source: trunk/kernel/kern/process.c @ 662

Last change on this file since 662 was 662, checked in by alain, 4 years ago

Introduce the ksocket.h & ksocket.c files in kernel/kern.

File size: 94.0 KB
Line 
1/*
2 * process.c - process related functions definition.
3 *
4 * Authors  Ghassan Almaless (2008,2009,2010,2011,2012)
5 *          Mohamed Lamine Karaoui (2015)
6 *          Alain Greiner (2016,2017,2018,2019,2020)
7 *
8 * Copyright (c) UPMC Sorbonne Universites
9 *
10 * This file is part of ALMOS-MKH.
11 *
12 * ALMOS-MKH is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; version 2.0 of the License.
15 *
16 * ALMOS-MKH is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with ALMOS-MKH; if not, write to the Free Software Foundation,
23 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24 */
25
26#include <kernel_config.h>
27#include <hal_kernel_types.h>
28#include <hal_remote.h>
29#include <hal_uspace.h>
30#include <hal_irqmask.h>
31#include <hal_vmm.h>
32#include <errno.h>
33#include <printk.h>
34#include <memcpy.h>
35#include <bits.h>
36#include <kmem.h>
37#include <page.h>
38#include <vmm.h>
39#include <vfs.h>
40#include <core.h>
41#include <thread.h>
42#include <chdev.h>
43#include <list.h>
44#include <string.h>
45#include <scheduler.h>
46#include <busylock.h>
47#include <queuelock.h>
48#include <remote_queuelock.h>
49#include <rwlock.h>
50#include <remote_rwlock.h>
51#include <dqdt.h>
52#include <cluster.h>
53#include <ppm.h>
54#include <boot_info.h>
55#include <process.h>
56#include <elf.h>
57#include <syscalls.h>
58#include <shared_syscalls.h>
59
60//////////////////////////////////////////////////////////////////////////////////////////
61// Extern global variables
62//////////////////////////////////////////////////////////////////////////////////////////
63
64extern process_t           process_zero;     // allocated in kernel_init.c
65extern chdev_directory_t   chdev_dir;        // allocated in kernel_init.c
66
67//////////////////////////////////////////////////////////////////////////////////////////
68// Process initialisation related functions
69//////////////////////////////////////////////////////////////////////////////////////////
70
71/////////////////////////////////
72process_t * process_alloc( void )
73{
74        kmem_req_t req;
75
76    req.type  = KMEM_KCM;
77        req.order = bits_log2( sizeof(process_t) );
78        req.flags = AF_KERNEL;
79
80    return kmem_alloc( &req );
81}
82
83////////////////////////////////////////
84void process_free( process_t * process )
85{
86    kmem_req_t  req;
87
88        req.type = KMEM_KCM;
89        req.ptr  = process;
90        kmem_free( &req );
91}
92
93////////////////////////////////////////////////////
94error_t process_reference_init( process_t * process,
95                                pid_t       pid,
96                                xptr_t      parent_xp )
97{
98    error_t     error;
99    xptr_t      process_xp;
100    cxy_t       parent_cxy;
101    process_t * parent_ptr;
102    xptr_t      stdin_xp;
103    xptr_t      stdout_xp;
104    xptr_t      stderr_xp;
105    uint32_t    stdin_id;
106    uint32_t    stdout_id;
107    uint32_t    stderr_id;
108    uint32_t    txt_id;
109    char        rx_path[40];
110    char        tx_path[40];
111    xptr_t      file_xp;
112    xptr_t      chdev_xp;
113    chdev_t   * chdev_ptr;
114    cxy_t       chdev_cxy;
115    pid_t       parent_pid;
116    vmm_t     * vmm;
117
118    // build extended pointer on this reference process
119    process_xp = XPTR( local_cxy , process );
120
121    // get pointer on process vmm
122    vmm = &process->vmm;
123
124    // get parent process cluster and local pointer
125    parent_cxy = GET_CXY( parent_xp );
126    parent_ptr = GET_PTR( parent_xp );
127
128    // get parent_pid
129    parent_pid = hal_remote_l32( XPTR( parent_cxy , &parent_ptr->pid ) );
130
131#if DEBUG_PROCESS_REFERENCE_INIT
132thread_t * this = CURRENT_THREAD;
133uint32_t cycle = (uint32_t)hal_get_cycles();
134if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
135printk("\n[%s] thread[%x,%x] enter to initialize process %x / cycle %d\n",
136__FUNCTION__, this->process->pid, this->trdid, pid, cycle );
137#endif
138
139    // initialize pid, ref_xp, parent_xp, owner_xp, term_state fields
140        process->pid        = pid;
141    process->ref_xp     = XPTR( local_cxy , process );
142    process->owner_xp   = XPTR( local_cxy , process );
143    process->parent_xp  = parent_xp;
144    process->term_state = 0;
145
146    // initialize VFS root inode and CWD inode
147    process->vfs_root_xp = hal_remote_l64( XPTR( parent_cxy, &parent_ptr->vfs_root_xp ) );
148    process->cwd_xp      = hal_remote_l64( XPTR( parent_cxy, &parent_ptr->cwd_xp ) );
149
150    // initialize VSL as empty
151    vmm->vsegs_nr = 0;
152        xlist_root_init( XPTR( local_cxy , &vmm->vsegs_root ) );
153
154    // create an empty GPT as required by the architecture
155    error = hal_gpt_create( &vmm->gpt );
156    if( error ) 
157    {
158        printk("\n[ERROR] in %s : cannot create empty GPT\n", __FUNCTION__ );
159        return -1;
160    }
161
162#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
163if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
164printk("\n[%s] thread[%x,%x] created empty GPT for process %x\n",
165__FUNCTION__, parent_pid, this->trdid, pid );
166#endif
167
168    // initialize VSL lock
169        remote_rwlock_init( XPTR( local_cxy , &vmm->vsl_lock ) , LOCK_VMM_VSL );
170
171    // register kernel vsegs in user process VMM as required by the architecture
172    error = hal_vmm_kernel_update( process );
173    if( error ) 
174    {
175        printk("\n[ERROR] in %s : cannot register kernel vsegs in VMM\n", __FUNCTION__ );
176        return -1;
177    }
178
179#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
180if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
181printk("\n[%s] thread[%x,%x] registered kernel vsegs in VSL for process %x\n",
182__FUNCTION__, parent_pid, this->trdid, pid );
183#endif
184
185    // create "args" and "envs" vsegs
186    // create "stacks" and "mmap" vsegs allocators
187    // initialize locks protecting GPT and VSL
188    error = vmm_user_init( process );
189    if( error ) 
190    {
191        printk("\n[ERROR] in %s : cannot register user vsegs in VMM\n", __FUNCTION__ );
192        return -1;
193    }
194 
195#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
196cycle = (uint32_t)hal_get_cycles();
197if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
198printk("\n[%s] thread[%x,%x] initialized vmm for process %x\n", 
199__FUNCTION__, parent_pid, this->trdid, pid );
200#endif
201
202    // initialize fd_array as empty
203    process_fd_init( process );
204
205    // define the stdin/stdout/stderr pseudo files <=> select a TXT terminal.
206    if( (pid == 1) || (parent_pid  == 1) )      // INIT or KSH  process
207    {
208        // select a TXT channel
209        if( pid == 1 )  txt_id = 0;                     // INIT
210        else            txt_id = process_txt_alloc();   // KSH
211
212        // attach process to TXT
213        process_txt_attach( process , txt_id ); 
214
215#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
216cycle = (uint32_t)hal_get_cycles();
217if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
218printk("\n[%s] thread[%x,%x] / process %x attached to TXT%d / cycle %d\n", 
219__FUNCTION__, parent_pid, this->trdid, pid, txt_id, cycle );
220#endif
221        // build path to TXT_RX[i] and TXT_TX[i] chdevs
222        snprintf( rx_path , 40 , "/dev/external/txt%d_rx", txt_id );
223        snprintf( tx_path , 40 , "/dev/external/txt%d_tx", txt_id );
224
225        // create stdin pseudo file         
226        error = vfs_open(  process->vfs_root_xp,
227                           rx_path,
228                           process_xp,
229                           O_RDONLY, 
230                           0,                // FIXME chmod
231                           &stdin_xp, 
232                           &stdin_id );
233        if( error )
234        {
235            printk("\n[ERROR] in %s : cannot open stdout pseudo-file\n", __FUNCTION__ );
236            return -1;
237        }
238
239assert( (stdin_id == 0) , "stdin index must be 0" );
240
241#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
242cycle = (uint32_t)hal_get_cycles();
243if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
244printk("\n[%s] thread[%x,%x] / stdin open for process %x / cycle %d\n", 
245__FUNCTION__, parent_pid, this->trdid, pid, cycle );
246#endif
247
248        // create stdout pseudo file         
249        error = vfs_open(  process->vfs_root_xp,
250                           tx_path,
251                           process_xp,
252                           O_WRONLY, 
253                           0,                // FIXME chmod
254                           &stdout_xp, 
255                           &stdout_id );
256        if( error )
257        {
258            printk("\n[ERROR] in %s : cannot open stdout pseudo-file\n", __FUNCTION__ );
259            return -1;
260        }
261
262assert( (stdout_id == 1) , "stdout index must be 1" );
263
264#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
265cycle = (uint32_t)hal_get_cycles();
266if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
267printk("\n[%s] thread[%x,%x] / stdout open for process %x / cycle %d\n", 
268__FUNCTION__, parent_pid, this->trdid, pid, cycle );
269#endif
270
271        // create stderr pseudo file         
272        error = vfs_open(  process->vfs_root_xp,
273                           tx_path,
274                           process_xp,
275                           O_WRONLY, 
276                           0,                // FIXME chmod
277                           &stderr_xp, 
278                           &stderr_id );
279        if( error )
280        {
281            printk("\n[ERROR] in %s : cannot open stderr pseudo-file\n", __FUNCTION__ );
282            return -1;
283        }
284
285assert( (stderr_id == 2) , "stderr index must be 2" );
286
287#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
288cycle = (uint32_t)hal_get_cycles();
289if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
290printk("\n[%s] thread[%x,%x] / stderr open for process %x / cycle %d\n", 
291__FUNCTION__, parent_pid, this->trdid, pid, cycle );
292#endif
293
294    }
295    else                                            // normal user process
296    {
297        // get extended pointer on stdin pseudo file in parent process
298        file_xp = (xptr_t)hal_remote_l64( XPTR( parent_cxy,
299                                                &parent_ptr->fd_array.array[0] ) );
300
301        // get extended pointer on parent process TXT chdev
302        chdev_xp = chdev_from_file( file_xp );
303 
304        // get cluster and local pointer on chdev
305        chdev_cxy = GET_CXY( chdev_xp );
306        chdev_ptr = GET_PTR( chdev_xp );
307 
308        // get parent process TXT terminal index
309        txt_id = hal_remote_l32( XPTR( chdev_cxy , &chdev_ptr->channel ) );
310
311        // attach child process to parent process TXT terminal
312        process_txt_attach( process , txt_id ); 
313
314        // copy all open files from parent process fd_array to this process
315        process_fd_remote_copy( XPTR( local_cxy , &process->fd_array ),
316                                XPTR( parent_cxy , &parent_ptr->fd_array ) );
317    }
318
319    // initialize lock protecting CWD changes
320    remote_busylock_init( XPTR( local_cxy , 
321                                &process->cwd_lock ), LOCK_PROCESS_CWD );
322
323#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
324cycle = (uint32_t)hal_get_cycles();
325if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
326printk("\n[%s] thread[%x,%x] / set fd_array for process %x / cycle %d\n", 
327__FUNCTION__, parent_pid, this->trdid, pid , cycle );
328#endif
329
330    // reset children list root
331    xlist_root_init( XPTR( local_cxy , &process->children_root ) );
332    process->children_nr     = 0;
333    remote_queuelock_init( XPTR( local_cxy,
334                                 &process->children_lock ), LOCK_PROCESS_CHILDREN );
335
336    // reset semaphore / mutex / barrier / condvar list roots and lock
337    xlist_root_init( XPTR( local_cxy , &process->sem_root ) );
338    xlist_root_init( XPTR( local_cxy , &process->mutex_root ) );
339    xlist_root_init( XPTR( local_cxy , &process->barrier_root ) );
340    xlist_root_init( XPTR( local_cxy , &process->condvar_root ) );
341    remote_queuelock_init( XPTR( local_cxy , 
342                                 &process->sync_lock ), LOCK_PROCESS_USERSYNC );
343
344    // reset open directories root and lock
345    xlist_root_init( XPTR( local_cxy , &process->dir_root ) );
346    remote_queuelock_init( XPTR( local_cxy , 
347                                 &process->dir_lock ), LOCK_PROCESS_DIR );
348
349    // register new process in the local cluster manager pref_tbl[]
350    lpid_t lpid = LPID_FROM_PID( pid );
351    LOCAL_CLUSTER->pmgr.pref_tbl[lpid] = XPTR( local_cxy , process );
352
353    // register new process descriptor in local cluster manager local_list
354    cluster_process_local_link( process );
355
356    // register new process descriptor in local cluster manager copies_list
357    cluster_process_copies_link( process );
358
359    // initialize th_tbl[] array and associated threads
360    uint32_t i;
361
362    for( i = 0 ; i < CONFIG_THREADS_MAX_PER_CLUSTER ; i++ )
363        {
364        process->th_tbl[i] = NULL;
365    }
366    process->th_nr  = 0;
367    rwlock_init( &process->th_lock , LOCK_PROCESS_THTBL );
368
369        hal_fence();
370
371#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
372cycle = (uint32_t)hal_get_cycles();
373if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
374printk("\n[%s] thread[%x,%x] exit for process %x / cycle %d\n", 
375__FUNCTION__, parent_pid, this->trdid, pid, cycle );
376#endif
377
378#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
379hal_vmm_display( parent_xp , false );
380hal_vmm_display( XPTR( local_cxy , process ) , false );
381#endif
382
383    return 0;
384
385}  // process_reference_init()
386
387/////////////////////////////////////////////////////
388error_t process_copy_init( process_t * local_process,
389                           xptr_t      reference_process_xp )
390{
391    error_t   error;
392    vmm_t   * vmm;
393
394    // get reference process cluster and local pointer
395    cxy_t       ref_cxy = GET_CXY( reference_process_xp );
396    process_t * ref_ptr = GET_PTR( reference_process_xp );
397
398    // get pointer on process vmm
399    vmm = &local_process->vmm;
400
401    // initialize PID, REF_XP, PARENT_XP, and STATE
402    local_process->pid        = hal_remote_l32(  XPTR( ref_cxy , &ref_ptr->pid ) );
403    local_process->parent_xp  = hal_remote_l64( XPTR( ref_cxy , &ref_ptr->parent_xp ) );
404    local_process->ref_xp     = reference_process_xp;
405    local_process->owner_xp   = reference_process_xp;
406    local_process->term_state = 0;
407
408#if DEBUG_PROCESS_COPY_INIT
409thread_t * this = CURRENT_THREAD; 
410uint32_t cycle = (uint32_t)hal_get_cycles();
411if( DEBUG_PROCESS_COPY_INIT < cycle )
412printk("\n[%s] thread[%x,%x] enter for process %x / cycle %d\n",
413__FUNCTION__, this->process->pid, this->trdid, local_process->pid, cycle );
414#endif
415
416// check user process
417assert( (local_process->pid != 0), "LPID cannot be 0" );
418
419    // initialize VSL as empty
420    vmm->vsegs_nr = 0;
421        xlist_root_init( XPTR( local_cxy , &vmm->vsegs_root ) );
422
423    // create an empty GPT as required by the architecture
424    error = hal_gpt_create( &vmm->gpt );
425    if( error ) 
426    {
427        printk("\n[ERROR] in %s : cannot create empty GPT\n", __FUNCTION__ );
428        return -1;
429    }
430
431    // initialize GPT and VSL locks
432        remote_rwlock_init( XPTR( local_cxy , &vmm->vsl_lock ) , LOCK_VMM_VSL );
433
434    // register kernel vsegs in VMM as required by the architecture
435    error = hal_vmm_kernel_update( local_process );
436    if( error ) 
437    {
438        printk("\n[ERROR] in %s : cannot register kernel vsegs in VMM\n", __FUNCTION__ );
439        return -1;
440    }
441
442    // create "args" and "envs" vsegs
443    // create "stacks" and "mmap" vsegs allocators
444    // initialize locks protecting GPT and VSL
445    error = vmm_user_init( local_process );
446    if( error ) 
447    {
448        printk("\n[ERROR] in %s : cannot register user vsegs in VMM\n", __FUNCTION__ );
449        return -1;
450    }
451 
452#if (DEBUG_PROCESS_COPY_INIT & 1)
453cycle = (uint32_t)hal_get_cycles();
454if( DEBUG_PROCESS_COPY_INIT < cycle )
455printk("\n[%s] thread[%x,%x] initialized vmm for process %x / cycle %d\n", 
456__FUNCTION__, parent_pid, this->trdid, pid, cycle );
457#endif
458
459    // set process file descriptors array
460        process_fd_init( local_process );
461
462    // set vfs_root_xp / vfs_bin_xp / cwd_xp fields
463    local_process->vfs_root_xp = hal_remote_l64( XPTR( ref_cxy , &ref_ptr->vfs_root_xp ) );
464    local_process->vfs_bin_xp  = hal_remote_l64( XPTR( ref_cxy , &ref_ptr->vfs_bin_xp ) );
465    local_process->cwd_xp      = XPTR_NULL;
466
467    // reset children list root (not used in a process descriptor copy)
468    xlist_root_init( XPTR( local_cxy , &local_process->children_root ) );
469    local_process->children_nr   = 0;
470    remote_queuelock_init( XPTR( local_cxy , &local_process->children_lock ),
471                           LOCK_PROCESS_CHILDREN );
472
473    // reset children_list (not used in a process descriptor copy)
474    xlist_entry_init( XPTR( local_cxy , &local_process->children_list ) );
475
476    // reset semaphores list root (not used in a process descriptor copy)
477    xlist_root_init( XPTR( local_cxy , &local_process->sem_root ) );
478    xlist_root_init( XPTR( local_cxy , &local_process->mutex_root ) );
479    xlist_root_init( XPTR( local_cxy , &local_process->barrier_root ) );
480    xlist_root_init( XPTR( local_cxy , &local_process->condvar_root ) );
481
482    // initialize th_tbl[] array and associated fields
483    uint32_t i;
484    for( i = 0 ; i < CONFIG_THREADS_MAX_PER_CLUSTER ; i++ )
485        {
486        local_process->th_tbl[i] = NULL;
487    }
488    local_process->th_nr  = 0;
489    rwlock_init( &local_process->th_lock , LOCK_PROCESS_THTBL );
490
491    // register new process descriptor in local cluster manager local_list
492    cluster_process_local_link( local_process );
493
494    // register new process descriptor in owner cluster manager copies_list
495    cluster_process_copies_link( local_process );
496
497        hal_fence();
498
499#if DEBUG_PROCESS_COPY_INIT
500cycle = (uint32_t)hal_get_cycles();
501if( DEBUG_PROCESS_COPY_INIT < cycle )
502printk("\n[%s] thread[%x,%x] exit for process %x / cycle %d\n",
503__FUNCTION__, this->process->pid, this->trdid, local_process->pid, cycle );
504#endif
505
506    return 0;
507
508} // end process_copy_init()
509
510///////////////////////////////////////////
511void process_destroy( process_t * process )
512{
513    xptr_t      parent_xp;
514    process_t * parent_ptr;
515    cxy_t       parent_cxy;
516    xptr_t      children_lock_xp;
517    xptr_t      children_nr_xp;
518
519    pid_t       pid = process->pid;
520
521// check no more threads
522assert( (process->th_nr == 0),
523"process %x in cluster %x contains threads", pid , local_cxy );
524
525#if DEBUG_PROCESS_DESTROY
526thread_t * this = CURRENT_THREAD;
527uint32_t cycle = (uint32_t)hal_get_cycles();
528if( DEBUG_PROCESS_DESTROY < cycle )
529printk("\n[%s] thread[%x,%x] enter for process %x in cluster %x / cycle %d\n",
530__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy, cycle );
531#endif
532
533    // Destroy VMM
534    vmm_destroy( process );
535
536#if (DEBUG_PROCESS_DESTROY & 1)
537if( DEBUG_PROCESS_DESTROY < cycle )
538printk("\n[%s] thread[%x,%x] destroyed VMM for process %x in cluster %x\n",
539__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy );
540#endif
541
542    // remove process from local_list in local cluster manager
543    cluster_process_local_unlink( process );
544
545#if (DEBUG_PROCESS_DESTROY & 1)
546if( DEBUG_PROCESS_DESTROY < cycle )
547printk("\n[%s] thread[%x,%x] removed process %x in cluster %x from local list\n",
548__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy );
549#endif
550
551    // remove process from copies_list in owner cluster manager
552    cluster_process_copies_unlink( process );
553
554#if (DEBUG_PROCESS_DESTROY & 1)
555if( DEBUG_PROCESS_DESTROY < cycle )
556printk("\n[%s] thread[%x,%x] removed process %x in cluster %x from copies list\n",
557__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy );
558#endif
559
560    // when target process cluster is the owner cluster
561    // - remove process from TXT list and transfer ownership
562    // - remove process from children_list
563    // - release PID
564    if( CXY_FROM_PID( pid ) == local_cxy )
565    {
566        process_txt_detach( XPTR( local_cxy , process ) );
567
568#if (DEBUG_PROCESS_DESTROY & 1)
569if( DEBUG_PROCESS_DESTROY < cycle )
570printk("\n[%s] thread[%x,%x] removed process %x from TXT list\n",
571__FUNCTION__, this->process->pid, this->trdid, pid );
572#endif
573
574        // get pointers on parent process
575        parent_xp  = process->parent_xp;
576        parent_cxy = GET_CXY( parent_xp );
577        parent_ptr = GET_PTR( parent_xp );
578
579        // get extended pointer on children_lock in parent process
580        children_lock_xp = XPTR( parent_cxy , &parent_ptr->children_lock );
581        children_nr_xp   = XPTR( parent_cxy , &parent_ptr->children_nr );
582
583        // remove process from children_list
584        remote_queuelock_acquire( children_lock_xp );
585        xlist_unlink( XPTR( local_cxy , &process->children_list ) );
586            hal_remote_atomic_add( children_nr_xp , -1 );
587        remote_queuelock_release( children_lock_xp );
588
589#if (DEBUG_PROCESS_DESTROY & 1)
590if( DEBUG_PROCESS_DESTROY < cycle )
591printk("\n[%s] thread[%x,%x] removed process %x from parent process children list\n",
592__FUNCTION__, this->process->pid, this->trdid, pid );
593#endif
594
595        // release the process PID to cluster manager
596        cluster_pid_release( pid );
597
598#if (DEBUG_PROCESS_DESTROY & 1)
599if( DEBUG_PROCESS_DESTROY < cycle )
600printk("\n[%s] thread[%x,%x] released process PID %x to pmgr in cluster %x\n",
601__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy );
602#endif
603
604    }
605
606    // FIXME decrement the refcount on file pointer for vfs_bin_xp [AG]
607
608    // FIXME close all open files [AG]
609
610    // FIXME synchronize dirty files [AG]
611
612    // release memory allocated to process descriptor
613    process_free( process );
614
615#if DEBUG_PROCESS_DESTROY
616cycle = (uint32_t)hal_get_cycles();
617if( DEBUG_PROCESS_DESTROY < cycle )
618printk("\n[%s] thread[%x,%x] exit / process %x in cluster %x / cycle %d\n",
619__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy, cycle );
620#endif
621
622}  // end process_destroy()
623
624///////////////////////////////////////////////////////////////////
625const char * process_action_str( process_sigactions_t action_type )
626{
627    switch ( action_type )
628    {
629        case BLOCK_ALL_THREADS:   return "BLOCK";
630        case UNBLOCK_ALL_THREADS: return "UNBLOCK";
631        case DELETE_ALL_THREADS:  return "DELETE";
632        default:                  return "undefined";
633    }
634}
635
636////////////////////////////////////////
637void process_sigaction( pid_t       pid,
638                        uint32_t    type )
639{
640    cxy_t              owner_cxy;         // owner cluster identifier
641    lpid_t             lpid;              // process index in owner cluster
642    cluster_t        * cluster;           // pointer on cluster manager
643    xptr_t             root_xp;           // extended pointer on root of copies
644    xptr_t             lock_xp;           // extended pointer on lock protecting copies
645    xptr_t             iter_xp;           // iterator on copies list
646    xptr_t             process_xp;        // extended pointer on process copy
647    cxy_t              process_cxy;       // process copy cluster identifier
648    process_t        * process_ptr;       // local pointer on process copy
649    reg_t              save_sr;           // for critical section
650    thread_t         * client;            // pointer on client thread
651    xptr_t             client_xp;         // extended pointer on client thread
652    process_t        * local;             // pointer on process copy in local cluster
653    uint32_t           remote_nr;         // number of remote process copies
654    rpc_desc_t         rpc;               // shared RPC descriptor
655    uint32_t           responses;         // shared RPC responses counter
656
657    client    = CURRENT_THREAD;
658    client_xp = XPTR( local_cxy , client );
659    local     = NULL;
660    remote_nr = 0;
661
662    // check calling thread can yield
663    thread_assert_can_yield( client , __FUNCTION__ );
664
665#if DEBUG_PROCESS_SIGACTION
666uint32_t cycle = (uint32_t)hal_get_cycles();
667if( DEBUG_PROCESS_SIGACTION < cycle )
668printk("\n[%s] thread[%x,%x] enter to %s process %x / cycle %d\n",
669__FUNCTION__ , client->process->pid, client->trdid,
670process_action_str( type ) , pid , cycle );
671#endif
672
673    // get pointer on local cluster manager
674    cluster = LOCAL_CLUSTER;
675
676    // get owner cluster identifier and process lpid
677    owner_cxy = CXY_FROM_PID( pid );
678    lpid      = LPID_FROM_PID( pid );
679
680    // get root of list of copies and lock from owner cluster
681    root_xp   = XPTR( owner_cxy , &cluster->pmgr.copies_root[lpid] );
682    lock_xp   = XPTR( owner_cxy , &cluster->pmgr.copies_lock[lpid] );
683
684// check action type
685assert( ((type == DELETE_ALL_THREADS ) ||
686         (type == BLOCK_ALL_THREADS )  ||
687         (type == UNBLOCK_ALL_THREADS )), "illegal action type" );
688             
689    // This client thread send parallel RPCs to all remote clusters containing
690    // target process copies, wait all responses, and then handles directly
691    // the threads in local cluster, when required.
692    // The client thread allocates a - shared - RPC descriptor in the stack,
693    // because all parallel, non-blocking, server threads use the same input
694    // arguments, and use the shared RPC response field
695
696    // mask IRQs
697    hal_disable_irq( &save_sr);
698
699    // client thread blocks itself
700    thread_block( client_xp , THREAD_BLOCKED_RPC );
701
702    // initialize RPC responses counter
703    responses = 0;
704
705    // initialize shared RPC descriptor
706    // can be shared, because no out arguments
707    rpc.rsp       = &responses;
708    rpc.blocking  = false;
709    rpc.index     = RPC_PROCESS_SIGACTION;
710    rpc.thread    = client;
711    rpc.lid       = client->core->lid;
712    rpc.args[0]   = pid;
713    rpc.args[1]   = type;
714
715    // take the lock protecting process copies
716    remote_queuelock_acquire( lock_xp );
717
718    // scan list of process copies
719    XLIST_FOREACH( root_xp , iter_xp )
720    {
721        // get extended pointers and cluster on process
722        process_xp  = XLIST_ELEMENT( iter_xp , process_t , copies_list );
723        process_cxy = GET_CXY( process_xp );
724        process_ptr = GET_PTR( process_xp );
725
726        if( process_cxy == local_cxy )    // process copy is local
727        { 
728            local = process_ptr;
729        }
730        else                              // process copy is remote
731        {
732            // update number of remote process copies
733            remote_nr++;
734
735            // atomically increment RPC responses counter
736            hal_atomic_add( &responses , 1 );
737
738#if DEBUG_PROCESS_SIGACTION
739if( DEBUG_PROCESS_SIGACTION < cycle )
740printk("\n[%s] thread[%x,%x] send RPC to cluster %x for process %x\n",
741__FUNCTION__, client->process->pid, client->trdid, process_cxy, pid );
742#endif
743            // call RPC in target cluster
744            rpc_send( process_cxy , &rpc );
745        }
746    }  // end list of copies
747
748    // release the lock protecting process copies
749    remote_queuelock_release( lock_xp );
750
751    // restore IRQs
752    hal_restore_irq( save_sr);
753
754    // - if there is remote process copies, the client thread deschedules,
755    //   (it will be unblocked by the last RPC server thread).
756    // - if there is no remote copies, the client thread unblock itself.
757    if( remote_nr )
758    {
759        sched_yield("blocked on rpc_process_sigaction");
760    } 
761    else
762    {
763        thread_unblock( client_xp , THREAD_BLOCKED_RPC );
764    }
765
766    // handle the local process copy if required
767    if( local != NULL )
768    {
769
770#if DEBUG_PROCESS_SIGACTION
771if( DEBUG_PROCESS_SIGACTION < cycle )
772printk("\n[%s] thread[%x,%x] handles local process %x in cluster %x\n",
773__FUNCTION__, client->process->pid, client->trdid, pid , local_cxy );
774#endif
775        if     (type == DELETE_ALL_THREADS  ) process_delete_threads ( local , client_xp ); 
776        else if(type == BLOCK_ALL_THREADS   ) process_block_threads  ( local ); 
777        else if(type == UNBLOCK_ALL_THREADS ) process_unblock_threads( local );
778    }
779
780#if DEBUG_PROCESS_SIGACTION
781cycle = (uint32_t)hal_get_cycles();
782if( DEBUG_PROCESS_SIGACTION < cycle )
783printk("\n[%s] thread[%x,%x] exit after %s process %x / cycle %d\n",
784__FUNCTION__, client->process->pid, client->trdid,
785process_action_str( type ), pid, cycle );
786#endif
787
788}  // end process_sigaction()
789
790/////////////////////////////////////////////////
791void process_block_threads( process_t * process )
792{
793    thread_t          * target;         // pointer on target thread
794    thread_t          * this;           // pointer on calling thread
795    uint32_t            ltid;           // index in process th_tbl[]
796    uint32_t            count;          // requests counter
797    volatile uint32_t   ack_count;      // acknowledges counter
798
799    // get calling thread pointer
800    this = CURRENT_THREAD;
801
802#if DEBUG_PROCESS_SIGACTION
803pid_t pid = process->pid;
804uint32_t cycle = (uint32_t)hal_get_cycles();
805if( DEBUG_PROCESS_SIGACTION < cycle )
806printk("\n[%s] thread[%x,%x] enter for process %x in cluster %x / cycle %d\n",
807__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy , cycle );
808#endif
809
810// check target process is an user process
811assert( (LPID_FROM_PID( process->pid ) != 0 ),
812"process %x is not an user process\n", process->pid );
813
814    // get lock protecting process th_tbl[]
815    rwlock_rd_acquire( &process->th_lock );
816
817    // loop on target process local threads
818    // we use both "ltid" and "count" because it can exist "holes" in th_tbl
819    // - if the calling thread and the target thread are not running on the same
820    //   core, we ask the target scheduler to acknowlege the blocking
821    //   to be sure that the target thread is not running.
822    // - if the calling thread and the target thread are running on the same core,
823    //   we don't need confirmation from scheduler.
824           
825    for( ltid = 0 , count = 0 , ack_count = 0 ; count < process->th_nr ; ltid++ )
826    {
827        target = process->th_tbl[ltid];
828
829        if( target != NULL )                                 // thread exist
830        {
831            count++;
832
833            // set the global blocked bit in target thread descriptor.
834            thread_block( XPTR( local_cxy , target ) , THREAD_BLOCKED_GLOBAL );
835 
836            if( this->core->lid != target->core->lid )
837            {
838                // increment responses counter
839                hal_atomic_add( (void*)&ack_count , 1 );
840
841                // set FLAG_REQ_ACK and &ack_rsp_count in target descriptor
842                thread_set_req_ack( target , (uint32_t *)&ack_count );
843
844                // force scheduling on target thread
845                dev_pic_send_ipi( local_cxy , target->core->lid );
846            }
847        }
848    }
849
850    // release lock protecting process th_tbl[]
851    rwlock_rd_release( &process->th_lock );
852
853    // wait other threads acknowledges  TODO this could be improved...
854    while( 1 )
855    {
856        // exit when all scheduler acknowledges received
857        if ( ack_count == 0 ) break;
858   
859        // wait 1000 cycles before retry
860        hal_fixed_delay( 1000 );
861    }
862
863#if DEBUG_PROCESS_SIGACTION
864cycle = (uint32_t)hal_get_cycles();
865if( DEBUG_PROCESS_SIGACTION < cycle )
866printk("\n[%s] thread[%x,%x] exit for process %x in cluster %x / cycle %d\n",
867__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy , cycle );
868#endif
869
870}  // end process_block_threads()
871
872/////////////////////////////////////////////////
873void process_delete_threads( process_t * process,
874                             xptr_t      client_xp )
875{
876    thread_t          * target;        // local pointer on target thread
877    xptr_t              target_xp;     // extended pointer on target thread
878    cxy_t               owner_cxy;     // owner process cluster
879    uint32_t            ltid;          // index in process th_tbl
880    uint32_t            count;         // threads counter
881
882    // get calling thread pointer
883
884    // get target process owner cluster
885    owner_cxy = CXY_FROM_PID( process->pid );
886
887#if DEBUG_PROCESS_SIGACTION
888thread_t * this  = CURRENT_THREAD;
889uint32_t   cycle = (uint32_t)hal_get_cycles();
890if( DEBUG_PROCESS_SIGACTION < cycle )
891printk("\n[%s] thread[%x,%x] enter for process %x n cluster %x / cycle %d\n",
892__FUNCTION__, this->process->pid, this->trdid, process->pid, local_cxy, cycle );
893#endif
894
895// check target process is an user process
896assert( (LPID_FROM_PID( process->pid ) != 0),
897"process %x is not an user process\n", process->pid );
898
899    // get lock protecting process th_tbl[]
900    rwlock_wr_acquire( &process->th_lock );
901
902    // loop on target process local threads                       
903    // we use both "ltid" and "count" because it can exist "holes" in th_tbl
904    for( ltid = 0 , count = 0  ; count < process->th_nr ; ltid++ )
905    {
906        target = process->th_tbl[ltid];
907
908        if( target != NULL )    // valid thread 
909        {
910            count++;
911            target_xp = XPTR( local_cxy , target );
912
913            // main thread and client thread should not be deleted
914            if( ((ltid != 0) || (owner_cxy != local_cxy)) &&         // not main thread
915                (client_xp) != target_xp )                           // not client thread
916            {
917                // mark target thread for delete and block it
918                thread_delete( target_xp , true );                   // forced
919            }
920        }
921    }
922
923    // release lock protecting process th_tbl[]
924    rwlock_wr_release( &process->th_lock );
925
926#if DEBUG_PROCESS_SIGACTION
927cycle = (uint32_t)hal_get_cycles();
928if( DEBUG_PROCESS_SIGACTION < cycle )
929printk("\n[%s] thread[%x,%x] exit for process %x in cluster %x / cycle %d\n",
930__FUNCTION__, this->process->pid, this->trdid, process->pid, local_cxy , cycle );
931#endif
932
933}  // end process_delete_threads()
934
935///////////////////////////////////////////////////
936void process_unblock_threads( process_t * process )
937{
938    thread_t          * target;        // pointer on target thead
939    uint32_t            ltid;          // index in process th_tbl
940    uint32_t            count;         // requests counter
941
942#if DEBUG_PROCESS_SIGACTION
943thread_t * this  = CURRENT_THREAD;
944pid_t      pid   = process->pid;
945uint32_t   cycle = (uint32_t)hal_get_cycles();
946if( DEBUG_PROCESS_SIGACTION < cycle )
947printk("\n[%s] thread[%x,%x] enter for process %x in cluster %x / cycle %d\n",
948__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy , cycle );
949#endif
950
951// check target process is an user process
952assert( ( LPID_FROM_PID( process->pid ) != 0 ),
953"process %x is not an user process\n", process->pid );
954
955    // get lock protecting process th_tbl[]
956    rwlock_rd_acquire( &process->th_lock );
957
958    // loop on process threads to unblock all threads
959    // we use both "ltid" and "count" because it can exist "holes" in th_tbl
960    for( ltid = 0 , count = 0 ; count < process->th_nr ; ltid++ )
961    {
962        target = process->th_tbl[ltid];
963
964        if( target != NULL )             // thread found
965        {
966            count++;
967
968            // reset the global blocked bit in target thread descriptor.
969            thread_unblock( XPTR( local_cxy , target ) , THREAD_BLOCKED_GLOBAL );
970        }
971    }
972
973    // release lock protecting process th_tbl[]
974    rwlock_rd_release( &process->th_lock );
975
976#if DEBUG_PROCESS_SIGACTION
977cycle = (uint32_t)hal_get_cycles();
978if( DEBUG_PROCESS_SIGACTION < cycle )
979printk("\n[%s] thread[%x,%x] exit for process %x in cluster %x / cycle %d\n",
980__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy, cycle );
981#endif
982
983}  // end process_unblock_threads()
984
985///////////////////////////////////////////////
986process_t * process_get_local_copy( pid_t pid )
987{
988    error_t        error;
989    process_t    * process_ptr;   // local pointer on process
990    xptr_t         process_xp;    // extended pointer on process
991
992    cluster_t * cluster = LOCAL_CLUSTER;
993
994#if DEBUG_PROCESS_GET_LOCAL_COPY
995thread_t * this = CURRENT_THREAD;
996uint32_t cycle = (uint32_t)hal_get_cycles();
997if( DEBUG_PROCESS_GET_LOCAL_COPY < cycle )
998printk("\n[%s] thread[%x,%x] enter for process %x in cluster %x / cycle %d\n",
999__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy, cycle );
1000#endif
1001
1002    // get lock protecting local list of processes
1003    remote_queuelock_acquire( XPTR( local_cxy , &cluster->pmgr.local_lock ) );
1004
1005    // scan the local list of process descriptors to find the process
1006    xptr_t  iter;
1007    bool_t  found = false;
1008    XLIST_FOREACH( XPTR( local_cxy , &cluster->pmgr.local_root ) , iter )
1009    {
1010        process_xp  = XLIST_ELEMENT( iter , process_t , local_list );
1011        process_ptr = GET_PTR( process_xp );
1012        if( process_ptr->pid == pid )
1013        {
1014            found = true;
1015            break;
1016        }
1017    }
1018
1019    // release lock protecting local list of processes
1020    remote_queuelock_release( XPTR( local_cxy , &cluster->pmgr.local_lock ) );
1021
1022    // allocate memory for a new local process descriptor
1023    // and initialise it from reference cluster if not found
1024    if( !found )
1025    {
1026        // get extended pointer on reference process descriptor
1027        xptr_t ref_xp = cluster_get_reference_process_from_pid( pid );
1028
1029        assert( (ref_xp != XPTR_NULL) , "illegal pid\n" );
1030
1031        // allocate memory for local process descriptor
1032        process_ptr = process_alloc();
1033
1034        if( process_ptr == NULL )  return NULL;
1035
1036        // initialize local process descriptor copy
1037        error = process_copy_init( process_ptr , ref_xp );
1038
1039        if( error ) return NULL;
1040    }
1041
1042#if DEBUG_PROCESS_GET_LOCAL_COPY
1043cycle = (uint32_t)hal_get_cycles();
1044if( DEBUG_PROCESS_GET_LOCAL_COPY < cycle )
1045printk("\n[%s] thread[%x,%x] exit in cluster %x / process %x / cycle %d\n",
1046__FUNCTION__, this->process->pid, this->trdid, local_cxy, process_ptr, cycle );
1047#endif
1048
1049    return process_ptr;
1050
1051}  // end process_get_local_copy()
1052
1053////////////////////////////////////////////
1054pid_t process_get_ppid( xptr_t  process_xp )
1055{
1056    cxy_t       process_cxy;
1057    process_t * process_ptr;
1058    xptr_t      parent_xp;
1059    cxy_t       parent_cxy;
1060    process_t * parent_ptr;
1061
1062    // get process cluster and local pointer
1063    process_cxy = GET_CXY( process_xp );
1064    process_ptr = GET_PTR( process_xp );
1065
1066    // get pointers on parent process
1067    parent_xp  = (xptr_t)hal_remote_l64( XPTR( process_cxy , &process_ptr->parent_xp ) );
1068    parent_cxy = GET_CXY( parent_xp );
1069    parent_ptr = GET_PTR( parent_xp );
1070
1071    return hal_remote_l32( XPTR( parent_cxy , &parent_ptr->pid ) );
1072}
1073
1074//////////////////////////////////////////////////////////////////////////////////////////
1075// File descriptor array related functions
1076//////////////////////////////////////////////////////////////////////////////////////////
1077
1078///////////////////////////////////////////
1079char * process_fd_type_str( uint32_t type )
1080{
1081    switch( type )
1082    {
1083        case INODE_TYPE_FILE : return "FILE";
1084        case INODE_TYPE_DIR  : return "DIR";
1085        case INODE_TYPE_FIFO : return "FIFO";
1086        case INODE_TYPE_PIPE : return "PIPE";
1087        case INODE_TYPE_SOCK : return "SOCK";
1088        case INODE_TYPE_DEV  : return "DEV";
1089        case INODE_TYPE_BLK  : return "BLK";
1090        case INODE_TYPE_SYML : return "SYML";
1091       
1092        default              : return "undefined";
1093    }
1094}
1095   
1096///////////////////////////////////////////
1097void process_fd_init( process_t * process )
1098{
1099    uint32_t fd;
1100
1101    // initialize lock
1102    remote_queuelock_init( XPTR( local_cxy , &process->fd_array.lock ), LOCK_PROCESS_FDARRAY );
1103
1104    // initialize number of open files
1105    process->fd_array.max = 0;
1106
1107    // initialize array
1108    for ( fd = 0 ; fd < CONFIG_PROCESS_FILE_MAX_NR ; fd++ )
1109    {
1110        process->fd_array.array[fd] = XPTR_NULL;
1111    }
1112}
1113
1114////////////////////////////////////////////////////
1115error_t process_fd_register( xptr_t      process_xp,
1116                             xptr_t      file_xp,
1117                             uint32_t  * fdid )
1118{
1119    bool_t    found;
1120    uint32_t  id;
1121    uint32_t  max;             // current value of max non-free slot index
1122    xptr_t    entry_xp;        // current value of one fd_array entry
1123    xptr_t    lock_xp;         // extended pointer on lock protecting fd_array
1124    xptr_t    max_xp;          // extended pointer on max field in fd_array
1125
1126    // get target process cluster and local pointer
1127    process_t * process_ptr = GET_PTR( process_xp );
1128    cxy_t       process_cxy = GET_CXY( process_xp );
1129
1130// check target process is owner process
1131assert( (process_xp == hal_remote_l64( XPTR( process_cxy , &process_ptr->owner_xp ) ) ),
1132"process must be owner process\n" );
1133
1134#if DEBUG_PROCESS_FD_REGISTER
1135thread_t * this  = CURRENT_THREAD;
1136uint32_t   cycle = (uint32_t)hal_get_cycles();
1137pid_t      pid   = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid) );
1138if( DEBUG_PROCESS_FD_REGISTER < cycle )
1139printk("\n[%s] thread[%x,%x] enter for process %x / cycle %d\n",
1140__FUNCTION__, this->process->pid, this->trdid, pid, cycle );
1141#endif
1142
1143    // build extended pointers on lock & max
1144    lock_xp = XPTR( process_cxy , &process_ptr->fd_array.lock );
1145    max_xp  = XPTR( process_cxy , &process_ptr->fd_array.max );
1146
1147    // take lock protecting reference fd_array
1148        remote_queuelock_acquire( lock_xp );
1149
1150    found   = false;
1151
1152    // get current value of max_fdid
1153    max = hal_remote_l32( max_xp );
1154
1155    for ( id = 0; id < CONFIG_PROCESS_FILE_MAX_NR ; id++ )
1156    {
1157        // get fd_array entry
1158        entry_xp = hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[id] ) );
1159       
1160        if ( entry_xp == XPTR_NULL )
1161        {
1162            // update  fd_array
1163            hal_remote_s64( XPTR( process_cxy , &process_ptr->fd_array.array[id] ) , file_xp );
1164
1165            // update max when required
1166            if( id > max ) hal_remote_s32( max_xp , id );
1167
1168            // increase file refcount
1169            vfs_file_count_up( file_xp );
1170
1171            // exit loop
1172                        *fdid = id;
1173            found = true;
1174            break;
1175        }
1176    }
1177
1178    // release lock protecting fd_array
1179        remote_queuelock_release( lock_xp );
1180
1181#if DEBUG_PROCESS_FD_REGISTER
1182cycle = (uint32_t)hal_get_cycles();
1183if( DEBUG_PROCESS_FD_REGISTER < cycle )
1184printk("\n[%s] thread[%x,%x] exit for process %x / fdid %d / cycle %d\n",
1185__FUNCTION__, this->process->pid, this->trdid, pid, id, cycle );
1186#endif
1187
1188    if ( !found ) return -1;
1189    else          return 0;
1190
1191}  // end process_fd_register()
1192
1193/////////////////////////////////////////////
1194void process_fd_remove( xptr_t    process_xp,
1195                        uint32_t  fdid )
1196{
1197    pid_t       pid;           // target process PID
1198    lpid_t      lpid;          // target process LPID
1199    xptr_t      file_xp;       // extended pointer on file descriptor
1200    xptr_t      iter_xp;       // iterator for list of process copies
1201    xptr_t      copy_xp;       // extended pointer on process copy
1202    process_t * copy_ptr;      // local pointer on process copy 
1203    cxy_t       copy_cxy;      // process copy cluster identifier
1204
1205    // get target process cluster and local pointer
1206    process_t * process_ptr = GET_PTR( process_xp );
1207    cxy_t       process_cxy = GET_CXY( process_xp );
1208
1209// check target process is owner process
1210assert( (process_xp == hal_remote_l64( XPTR( process_cxy , &process_ptr->owner_xp ) ) ),
1211"process must be owner process\n" );
1212
1213    // get target process pid and lpid
1214    pid  = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid) );
1215    lpid = LPID_FROM_PID( pid );
1216
1217#if DEBUG_PROCESS_FD_REMOVE
1218uint32_t    cycle = (uint32_t)hal_get_cycles();
1219thread_t  * this  = CURRENT_THREAD;
1220if( DEBUG_PROCESS_FD_REMOVE < cycle )
1221printk("\n[%s] thread[%x,%x] enter for fdid %d in process %x / cycle %d\n",
1222__FUNCTION__, this->process->pid, this->trdid, fdid, pid, cycle );
1223#endif
1224
1225    // get extended pointer on file descriptor
1226    file_xp = hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[fdid] ));
1227
1228    // build extended pointers on list_of_copies root and lock (in owner cluster)
1229    xptr_t copies_root_xp = XPTR( process_cxy , &LOCAL_CLUSTER->pmgr.copies_root[lpid] );
1230    xptr_t copies_lock_xp = XPTR( process_cxy , &LOCAL_CLUSTER->pmgr.copies_lock[lpid] );
1231 
1232    // build extended pointer on fd_array lock and max
1233    xptr_t fd_lock_xp = XPTR( process_cxy , &process_ptr->fd_array.lock );
1234    xptr_t fd_max_xp  = XPTR( process_cxy , &process_ptr->fd_array.max );
1235
1236    // take lock protecting fd_array
1237        remote_queuelock_acquire( fd_lock_xp );
1238
1239    // take the lock protecting the list of copies
1240    remote_queuelock_acquire( copies_lock_xp );
1241
1242    // get max value
1243    uint32_t max = hal_remote_l32( fd_max_xp );
1244
1245    // loop on list of process copies
1246    XLIST_FOREACH( copies_root_xp , iter_xp )
1247    {
1248        // get pointers on process copy
1249        copy_xp  = XLIST_ELEMENT( iter_xp , process_t , copies_list );
1250        copy_ptr = GET_PTR( copy_xp );
1251        copy_cxy = GET_CXY( copy_xp );
1252
1253        // release the fd_array entry in process copy
1254        hal_remote_s64( XPTR( copy_cxy , &copy_ptr->fd_array.array[fdid] ), XPTR_NULL );
1255
1256        // decrease file refcount
1257        vfs_file_count_down( file_xp );
1258    }
1259
1260    // update max when required
1261    if( fdid == max ) hal_remote_s32( fd_max_xp , max-1 );
1262
1263    // release the lock protecting reference fd_array
1264        remote_queuelock_release( fd_lock_xp );
1265
1266    // release the lock protecting the list of copies
1267    remote_queuelock_release( copies_lock_xp );
1268
1269#if DEBUG_PROCESS_FD_REMOVE
1270cycle = (uint32_t)hal_get_cycles();
1271if( DEBUG_PROCESS_FD_REMOVE < cycle )
1272printk("\n[%s] thread[%x,%x] exit for fdid %d in process %x / cycle %d\n",
1273__FUNCTION__, this->process->pid, this->trdid, fdid, pid, cycle );
1274#endif
1275
1276}  // end process_fd_remove()
1277
1278//////////////////////////////////////////////
1279void process_fd_clean_all( xptr_t process_xp )
1280{
1281    uint32_t  id;
1282    xptr_t    file_xp;         // one fd_array entry
1283    xptr_t    lock_xp;         // extendad pointer on lock protecting fd_array
1284    uint32_t  max;             // number of registered files
1285    error_t   error;
1286
1287    // get process cluster, local pointer and PID
1288    process_t * process_ptr = GET_PTR( process_xp );
1289    cxy_t       process_cxy = GET_CXY( process_xp );
1290    pid_t       pid         = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid) );
1291
1292// check target process is owner process
1293assert( (process_xp == hal_remote_l64( XPTR( process_cxy , &process_ptr->owner_xp )) ),
1294"process must be owner process\n" );
1295
1296#if DEBUG_PROCESS_FD_CLEAN_ALL
1297thread_t * this  = CURRENT_THREAD;
1298uint32_t   cycle = (uint32_t)hal_get_cycles();
1299if( DEBUG_PROCESS_FD_CLEAN_ALL < cycle )
1300printk("\n[%s] thread[%x,%x] enter for process %x / cycle %d\n",
1301__FUNCTION__, this->process->pid, this->trdid, pid, cycle );
1302
1303process_fd_display( process_xp );
1304#endif
1305
1306    // build extended pointer on lock protecting the fd_array
1307    lock_xp = XPTR( process_cxy , &process_ptr->fd_array.lock );
1308
1309    // get max index for fd_array
1310    max = hal_remote_l32( XPTR( process_cxy , &process_ptr->fd_array.max ));
1311
1312    // take lock protecting fd_array
1313        remote_queuelock_acquire( lock_xp );
1314
1315    for ( id = 0; id <= max ; id++ )
1316    {
1317        // get fd_array entry
1318        file_xp = hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[id] ) );
1319       
1320        if ( file_xp != XPTR_NULL )
1321        {
1322            // close the file or socket
1323            error = sys_close( id );
1324
1325            if( error )
1326            printk("/n[ERROR] in %s : cannot close the file %d for process %x\n",
1327            __FUNCTION__, id, pid );
1328        }
1329    }
1330
1331    // release lock protecting fd_array
1332        remote_queuelock_release( lock_xp );
1333
1334#if DEBUG_PROCESS_FD_CLEAN_ALL
1335cycle = (uint32_t)hal_get_cycles();
1336if( DEBUG_PROCESS_FD_CLEAN_ALL < cycle )
1337printk("\n[%s] thread[%x,%x] exit for process %x / cycle %d\n",
1338__FUNCTION__, this->process->pid, this->trdid, pid, cycle );
1339#endif
1340
1341}  // end process_fd_clean_all()
1342
1343//////////////////////////////////////////////////////////////
1344xptr_t process_fd_get_xptr_from_owner( xptr_t      process_xp,
1345                                       uint32_t    fdid )
1346{
1347    cxy_t       process_cxy = GET_CXY( process_xp );
1348    process_t * process_ptr = GET_PTR( process_xp );
1349
1350assert( (hal_remote_l64( XPTR( process_cxy , &process_ptr->owner_xp )) == process_xp),
1351"process_xp argument must be the owner process" );
1352
1353    // access owner process fd_array
1354    return hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[fdid] ));
1355
1356}  // end process_fd_get_xptr_from_owner()
1357
1358///////////////////////////////////////////////////////////
1359xptr_t process_fd_get_xptr_from_local( process_t * process,
1360                                       uint32_t    fdid )
1361{
1362    xptr_t  file_xp;
1363    xptr_t  lock_xp;
1364
1365    // access local copy of process descriptor
1366    file_xp = process->fd_array.array[fdid];
1367
1368    if( file_xp == XPTR_NULL )
1369    {
1370        // get owner process cluster and local pointer
1371        xptr_t      owner_xp  = process->owner_xp;
1372        cxy_t       owner_cxy = GET_CXY( owner_xp );
1373        process_t * owner_ptr = GET_PTR( owner_xp );
1374
1375        // build extended pointer on lock protecting fd_array
1376        lock_xp = XPTR( owner_cxy , &owner_ptr->fd_array.lock );
1377
1378        // take lock protecting fd_array
1379            remote_queuelock_acquire( lock_xp );
1380
1381        // access reference process descriptor
1382        file_xp = hal_remote_l64( XPTR( owner_cxy , &owner_ptr->fd_array.array[fdid] ) );
1383
1384        if( file_xp != XPTR_NULL ) 
1385        {
1386           // update local fd_array
1387            process->fd_array.array[fdid] = file_xp;
1388       
1389            // increase file refcount
1390            vfs_file_count_up( file_xp );
1391        }
1392
1393        // release lock protecting fd_array
1394            remote_queuelock_release( lock_xp );
1395    }
1396
1397    return file_xp;
1398
1399}  // end process_fd_get_xptr_from_local()
1400
1401///////////////////////////////////////////
1402void process_fd_remote_copy( xptr_t dst_xp,
1403                             xptr_t src_xp )
1404{
1405    uint32_t fd;
1406    xptr_t   entry;
1407
1408    // get cluster and local pointer for src fd_array
1409    cxy_t        src_cxy = GET_CXY( src_xp );
1410    fd_array_t * src_ptr = GET_PTR( src_xp );
1411
1412    // get cluster and local pointer for dst fd_array
1413    cxy_t        dst_cxy = GET_CXY( dst_xp );
1414    fd_array_t * dst_ptr = GET_PTR( dst_xp );
1415
1416    // get the remote lock protecting the src fd_array
1417        remote_queuelock_acquire( XPTR( src_cxy , &src_ptr->lock ) );
1418
1419    // loop on all fd_array entries
1420    for( fd = 0 ; fd < CONFIG_PROCESS_FILE_MAX_NR ; fd++ )
1421        {
1422                entry = (xptr_t)hal_remote_l64( XPTR( src_cxy , &src_ptr->array[fd] ) );
1423
1424                if( entry != XPTR_NULL )
1425                {
1426            // increment file descriptor refcount
1427            vfs_file_count_up( entry );
1428
1429                        // copy entry in destination process fd_array
1430                        hal_remote_s64( XPTR( dst_cxy , &dst_ptr->array[fd] ) , entry );
1431                }
1432        }
1433
1434    // release lock on source process fd_array
1435        remote_queuelock_release( XPTR( src_cxy , &src_ptr->lock ) );
1436
1437}  // end process_fd_remote_copy()
1438
1439
1440////////////////////////////////////
1441bool_t process_fd_array_full( void )
1442{
1443    // get extended pointer on owner process
1444    xptr_t owner_xp = CURRENT_THREAD->process->owner_xp;
1445
1446    // get owner process cluster and local pointer
1447    process_t * owner_ptr = GET_PTR( owner_xp );
1448    cxy_t       owner_cxy = GET_CXY( owner_xp );
1449
1450    // get number of open file descriptors from  fd_array
1451    uint32_t max = hal_remote_l32( XPTR( owner_cxy , &owner_ptr->fd_array.max ));
1452
1453        return ( max == CONFIG_PROCESS_FILE_MAX_NR - 1 );
1454}
1455
1456////////////////////////////////////////////
1457void process_fd_display( xptr_t process_xp )
1458{
1459    uint32_t      fdid;
1460    xptr_t        file_xp;
1461    vfs_file_t *  file_ptr;
1462    cxy_t         file_cxy;
1463    uint32_t      file_type;
1464    xptr_t        inode_xp;
1465    vfs_inode_t * inode_ptr;
1466
1467    char          name[CONFIG_VFS_MAX_NAME_LENGTH];
1468
1469    // get process cluster and local pointer
1470    process_t * process_ptr = GET_PTR( process_xp );
1471    cxy_t       process_cxy = GET_CXY( process_xp );
1472
1473    // get process PID
1474    pid_t  pid = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid ));
1475
1476    // get pointers on owner process descriptor
1477    xptr_t      owner_xp  = hal_remote_l64( XPTR( process_cxy , &process_ptr->owner_xp ));
1478    process_t * owner_ptr = GET_PTR( owner_xp );
1479    cxy_t       owner_cxy = GET_CXY( owner_xp );
1480
1481    // get max fdid from owner process descriptor
1482    uint32_t max = hal_remote_l32( XPTR( owner_cxy , &owner_ptr->fd_array.max ));
1483
1484    printk("\n***** fd_array for pid %x in cluster %x / max %d *****\n",
1485    pid, process_cxy, max );
1486
1487    for( fdid = 0 ; fdid <= max ; fdid++ )
1488    {
1489        // get pointers on file descriptor
1490        file_xp  = hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[fdid] ));
1491        file_ptr = GET_PTR( file_xp );
1492        file_cxy = GET_CXY( file_xp );
1493
1494        if( file_xp != XPTR_NULL )
1495        {
1496            // get file type
1497            file_type = hal_remote_l32( XPTR( file_cxy , &file_ptr->type )); 
1498
1499            // get file name for a true file
1500            if( file_type == INODE_TYPE_FILE )
1501            {
1502                // get inode pointers
1503                inode_ptr = hal_remote_lpt( XPTR( file_cxy , &file_ptr->inode ));
1504                inode_xp  = XPTR( file_cxy , inode_ptr );
1505
1506                // get file name
1507                vfs_inode_get_name( inode_xp , name );
1508
1509                // display relevant file decriptor info
1510                printk(" - %d : type %s (%s)\n",
1511                fdid , process_fd_type_str(file_type), name );
1512            }
1513            else
1514            {
1515                // display relevant file decriptor info
1516                printk(" - %d : type %s\n",
1517                fdid , process_fd_type_str(file_type) );
1518            }
1519        }
1520        else
1521        {
1522            // display relevant file decriptor info
1523            printk(" - %d : empty slot\n",
1524            fdid );
1525        }
1526    }
1527}   // end process_fd_display()
1528
1529////////////////////////////////////////////////////////////////////////////////////
1530//  Thread related functions
1531////////////////////////////////////////////////////////////////////////////////////
1532
1533/////////////////////////////////////////////////////
1534error_t process_register_thread( process_t * process,
1535                                 thread_t  * thread,
1536                                 trdid_t   * trdid )
1537{
1538    ltid_t         ltid;
1539    bool_t         found = false;
1540 
1541// check arguments
1542assert( (process != NULL) , "process argument is NULL" );
1543assert( (thread != NULL) , "thread argument is NULL" );
1544
1545    // get the lock protecting th_tbl for all threads
1546    // but the idle thread executing kernel_init (cannot yield)
1547    if( thread->type != THREAD_IDLE ) rwlock_wr_acquire( &process->th_lock );
1548
1549    // scan th_tbl
1550    for( ltid = 0 ; ltid < CONFIG_THREADS_MAX_PER_CLUSTER ; ltid++ )
1551    {
1552        if( process->th_tbl[ltid] == NULL )
1553        {
1554            found = true;
1555            break;
1556        }
1557    }
1558
1559    if( found )
1560    {
1561        // register thread in th_tbl[]
1562        process->th_tbl[ltid] = thread;
1563        process->th_nr++;
1564
1565        // returns trdid
1566        *trdid = TRDID( local_cxy , ltid );
1567    }
1568
1569    // release the lock protecting th_tbl
1570    if( thread->type != THREAD_IDLE ) rwlock_wr_release( &process->th_lock );
1571
1572    return (found) ? 0 : 0xFFFFFFFF;
1573
1574}  // end process_register_thread()
1575
1576///////////////////////////////////////////////////
1577uint32_t process_remove_thread( thread_t * thread )
1578{
1579    uint32_t count;  // number of threads in local process descriptor
1580
1581// check thread
1582assert( (thread != NULL) , "thread argument is NULL" );
1583
1584    process_t * process = thread->process;
1585
1586    // get thread local index
1587    ltid_t  ltid = LTID_FROM_TRDID( thread->trdid );
1588   
1589    // get the lock protecting th_tbl[]
1590    rwlock_wr_acquire( &process->th_lock );
1591
1592    // get number of threads
1593    count = process->th_nr;
1594
1595// check th_nr value
1596assert( (count > 0) , "process th_nr cannot be 0" );
1597
1598    // remove thread from th_tbl[]
1599    process->th_tbl[ltid] = NULL;
1600    process->th_nr = count-1;
1601
1602    // release lock protecting th_tbl
1603    rwlock_wr_release( &process->th_lock );
1604
1605    return count;
1606
1607}  // end process_remove_thread()
1608
1609/////////////////////////////////////////////////////////
1610error_t process_make_fork( xptr_t      parent_process_xp,
1611                           xptr_t      parent_thread_xp,
1612                           pid_t     * child_pid,
1613                           thread_t ** child_thread )
1614{
1615    process_t * process;         // local pointer on child process descriptor
1616    thread_t  * thread;          // local pointer on child thread descriptor
1617    pid_t       new_pid;         // process identifier for child process
1618    pid_t       parent_pid;      // process identifier for parent process
1619    xptr_t      ref_xp;          // extended pointer on reference process
1620    xptr_t      vfs_bin_xp;      // extended pointer on .elf file
1621    error_t     error;
1622
1623    // get cluster and local pointer for parent process
1624    cxy_t       parent_process_cxy = GET_CXY( parent_process_xp );
1625    process_t * parent_process_ptr = GET_PTR( parent_process_xp );
1626
1627    // get parent process PID and extended pointer on .elf file
1628    parent_pid = hal_remote_l32 (XPTR( parent_process_cxy , &parent_process_ptr->pid));
1629    vfs_bin_xp = hal_remote_l64(XPTR( parent_process_cxy , &parent_process_ptr->vfs_bin_xp));
1630
1631    // get extended pointer on reference process
1632    ref_xp = hal_remote_l64( XPTR( parent_process_cxy , &parent_process_ptr->ref_xp ) );
1633
1634// check parent process is the reference process
1635assert( (parent_process_xp == ref_xp ) ,
1636"parent process must be the reference process" );
1637
1638#if DEBUG_PROCESS_MAKE_FORK
1639uint32_t   cycle;
1640thread_t * this  = CURRENT_THREAD;
1641trdid_t    trdid = this->trdid;
1642pid_t      pid   = this->process->pid;
1643#endif
1644
1645#if( DEBUG_PROCESS_MAKE_FORK & 1 )
1646cycle   = (uint32_t)hal_get_cycles();
1647if( DEBUG_PROCESS_MAKE_FORK < cycle )
1648printk("\n[%s] thread[%x,%x] enter / cluster %x / cycle %d\n",
1649__FUNCTION__, pid, trdid, local_cxy, cycle );
1650#endif
1651
1652    // allocate a process descriptor
1653    process = process_alloc();
1654
1655    if( process == NULL )
1656    {
1657        printk("\n[ERROR] in %s : cannot get process in cluster %x\n", 
1658        __FUNCTION__, local_cxy ); 
1659        return -1;
1660    }
1661
1662    // allocate a child PID from local cluster
1663    error = cluster_pid_alloc( process , &new_pid );
1664    if( error ) 
1665    {
1666        printk("\n[ERROR] in %s : cannot get PID in cluster %x\n", 
1667        __FUNCTION__, local_cxy ); 
1668        process_free( process );
1669        return -1;
1670    }
1671
1672#if( DEBUG_PROCESS_MAKE_FORK & 1 )
1673cycle = (uint32_t)hal_get_cycles();
1674if( DEBUG_PROCESS_MAKE_FORK < cycle )
1675printk("\n[%s] thread[%x,%x] allocated child_process %x / cycle %d\n",
1676__FUNCTION__, pid, trdid, new_pid, cycle );
1677#endif
1678
1679    // initializes child process descriptor from parent process descriptor
1680    error = process_reference_init( process,
1681                                    new_pid,
1682                                    parent_process_xp );
1683    if( error ) 
1684    {
1685        printk("\n[ERROR] in %s : cannot initialize child process in cluster %x\n", 
1686        __FUNCTION__, local_cxy ); 
1687        process_free( process );
1688        return -1;
1689    }
1690
1691#if( DEBUG_PROCESS_MAKE_FORK & 1 )
1692cycle = (uint32_t)hal_get_cycles();
1693if( DEBUG_PROCESS_MAKE_FORK < cycle )
1694printk("\n[%s] thread[%x,%x] initialized child_process %x / cycle %d\n",
1695__FUNCTION__, pid, trdid, new_pid, cycle );
1696#endif
1697
1698    // copy VMM from parent descriptor to child descriptor
1699    error = vmm_fork_copy( process,
1700                           parent_process_xp );
1701    if( error )
1702    {
1703        printk("\n[ERROR] in %s : cannot copy VMM in cluster %x\n", 
1704        __FUNCTION__, local_cxy ); 
1705        process_free( process );
1706        cluster_pid_release( new_pid );
1707        return -1;
1708    }
1709
1710#if( DEBUG_PROCESS_MAKE_FORK & 1 )
1711cycle = (uint32_t)hal_get_cycles();
1712if( DEBUG_PROCESS_MAKE_FORK < cycle )
1713printk("\n[%s] thread[%x,%x] copied VMM from parent to child / cycle %d\n",
1714__FUNCTION__, pid, trdid, cycle );
1715hal_vmm_display( XPTR( local_cxy , process ) , true );
1716#endif
1717
1718    // if parent_process is INIT, or if parent_process is the TXT owner,
1719    // the child_process becomes the owner of its TXT terminal
1720    if( (parent_pid == 1) || process_txt_is_owner( parent_process_xp ) )
1721    {
1722        process_txt_set_ownership( XPTR( local_cxy , process ) );
1723
1724#if( DEBUG_PROCESS_MAKE_FORK & 1 )
1725cycle = (uint32_t)hal_get_cycles();
1726if( DEBUG_PROCESS_MAKE_FORK < cycle )
1727printk("\n[%s] thread[%x,%x] / child_process %x takes TXT ownership / cycle %d\n",
1728__FUNCTION__ , pid, trdid, new_pid, cycle );
1729#endif
1730
1731    }
1732
1733    // update extended pointer on .elf file
1734    process->vfs_bin_xp = vfs_bin_xp;
1735
1736    // create child thread descriptor from parent thread descriptor
1737    error = thread_user_fork( parent_thread_xp,
1738                              process,
1739                              &thread );
1740    if( error )
1741    {
1742        printk("\n[ERROR] in %s : cannot create thread in cluster %x\n",
1743        __FUNCTION__, local_cxy ); 
1744        process_free( process );
1745        cluster_pid_release( new_pid );
1746        return -1;
1747    }
1748
1749// check main thread LTID
1750assert( (LTID_FROM_TRDID(thread->trdid) == 0) ,
1751"main thread must have LTID == 0" );
1752
1753#if( DEBUG_PROCESS_MAKE_FORK & 1 )
1754cycle = (uint32_t)hal_get_cycles();
1755if( DEBUG_PROCESS_MAKE_FORK < cycle )
1756printk("\n[%s] thread[%x,%x] created main thread %x / cycle %d\n", 
1757__FUNCTION__, pid, trdid, thread, cycle );
1758#endif
1759
1760    // set COW flag in DATA, ANON, REMOTE vsegs in parent process VMM
1761    // this includes all parent process copies in all clusters
1762    if( parent_process_cxy == local_cxy )   // reference is local
1763    {
1764        vmm_set_cow( parent_process_ptr );
1765    }
1766    else                                    // reference is remote
1767    {
1768        rpc_vmm_set_cow_client( parent_process_cxy,
1769                                parent_process_ptr );
1770    }
1771
1772    // set COW flag in DATA, ANON, REMOTE vsegs for child process VMM
1773    vmm_set_cow( process );
1774 
1775#if( DEBUG_PROCESS_MAKE_FORK & 1 )
1776cycle = (uint32_t)hal_get_cycles();
1777if( DEBUG_PROCESS_MAKE_FORK < cycle )
1778printk("\n[%s] thread[%x,%x] set COW in DATA / ANON / REMOTE for parent and child / cycle %d\n",
1779__FUNCTION__, pid, trdid, cycle );
1780#endif
1781
1782    // get extended pointers on parent children_root, children_lock and children_nr
1783    xptr_t children_root_xp = XPTR( parent_process_cxy , &parent_process_ptr->children_root );
1784    xptr_t children_lock_xp = XPTR( parent_process_cxy , &parent_process_ptr->children_lock );
1785    xptr_t children_nr_xp   = XPTR( parent_process_cxy , &parent_process_ptr->children_nr   );
1786
1787    // register process in parent children list
1788    remote_queuelock_acquire( children_lock_xp );
1789        xlist_add_last( children_root_xp , XPTR( local_cxy , &process->children_list ) );
1790        hal_remote_atomic_add( children_nr_xp , 1 );
1791    remote_queuelock_release( children_lock_xp );
1792
1793    // return success
1794    *child_thread = thread;
1795    *child_pid    = new_pid;
1796
1797#if DEBUG_PROCESS_MAKE_FORK
1798cycle = (uint32_t)hal_get_cycles();
1799if( DEBUG_PROCESS_MAKE_FORK < cycle )
1800printk("\n[%s] thread[%x,%x] exit / created process %x / cycle %d\n",
1801__FUNCTION__, pid, trdid, new_pid, cycle );
1802#endif
1803
1804    return 0;
1805
1806}   // end process_make_fork()
1807
1808/////////////////////////////////////////////////////
1809error_t process_make_exec( exec_info_t  * exec_info )
1810{
1811    thread_t       * thread;                  // local pointer on this thread
1812    process_t      * process;                 // local pointer on this process
1813    pid_t            pid;                     // this process identifier
1814    xptr_t           ref_xp;                  // reference process for this process
1815        error_t          error;                   // value returned by called functions
1816    char           * path;                    // path to .elf file
1817    xptr_t           file_xp;                 // extended pointer on .elf file descriptor
1818    uint32_t         file_id;                 // file index in fd_array
1819    uint32_t         args_nr;                 // number of main thread arguments
1820    char          ** args_pointers;           // array of pointers on main thread arguments
1821
1822    // get calling thread, process, pid and ref_xp
1823    thread  = CURRENT_THREAD;
1824    process = thread->process;
1825    pid     = process->pid;
1826    ref_xp  = process->ref_xp;
1827
1828        // get relevant infos from exec_info
1829        path          = exec_info->path;
1830    args_nr       = exec_info->args_nr;
1831    args_pointers = exec_info->args_pointers;
1832
1833#if DEBUG_PROCESS_MAKE_EXEC
1834uint32_t cycle = (uint32_t)hal_get_cycles();
1835if( DEBUG_PROCESS_MAKE_EXEC < cycle )
1836printk("\n[%s] thread[%x,%x] enters for %s / cycle %d\n",
1837__FUNCTION__, pid, thread->trdid, path, cycle );
1838#endif
1839
1840    // open the file identified by <path>
1841    file_xp = XPTR_NULL;
1842    file_id = 0xFFFFFFFF;
1843        error   = vfs_open( process->vfs_root_xp,
1844                            path,
1845                        ref_xp,
1846                            O_RDONLY,
1847                            0,
1848                            &file_xp,
1849                            &file_id );
1850        if( error )
1851        {
1852                printk("\n[ERROR] in %s : failed to open file <%s>\n", __FUNCTION__ , path );
1853                return -1;
1854        }
1855
1856#if (DEBUG_PROCESS_MAKE_EXEC & 1)
1857cycle = (uint32_t)hal_get_cycles();
1858if( DEBUG_PROCESS_MAKE_EXEC < cycle )
1859printk("\n[%s] thread[%x,%x] opened file <%s> / cycle %d\n",
1860__FUNCTION__, pid, thread->trdid, path, cycle );
1861#endif
1862
1863    // delete all threads other than this main thread in all clusters
1864    process_sigaction( pid , DELETE_ALL_THREADS );
1865
1866#if (DEBUG_PROCESS_MAKE_EXEC & 1)
1867cycle = (uint32_t)hal_get_cycles();
1868if( DEBUG_PROCESS_MAKE_EXEC < cycle )
1869printk("\n[%s] thread[%x,%x] deleted existing threads / cycle %d\n",
1870__FUNCTION__, pid, thread->trdid, cycle );
1871#endif
1872
1873    // reset calling process VMM
1874    vmm_user_reset( process );
1875
1876#if( DEBUG_PROCESS_MAKE_EXEC & 1 )
1877cycle = (uint32_t)hal_get_cycles();
1878if( DEBUG_PROCESS_MAKE_EXEC < cycle )
1879printk("\n[%s] thread[%x,%x] completed VMM reset / cycle %d\n",
1880__FUNCTION__, pid, thread->trdid, cycle );
1881#endif
1882
1883    // re-initialize the VMM (args/envs vsegs registration)
1884    error = vmm_user_init( process );
1885    if( error )
1886    {
1887        printk("\n[ERROR] in %s : cannot initialise VMM for %s\n", __FUNCTION__ , path );
1888        vfs_close( file_xp , file_id );
1889        // FIXME restore old process VMM [AG]
1890        return -1;
1891    }
1892   
1893#if( DEBUG_PROCESS_MAKE_EXEC & 1 )
1894cycle = (uint32_t)hal_get_cycles();
1895if( DEBUG_PROCESS_MAKE_EXEC < cycle )
1896printk("\n[%s] thread[%x,%x] registered args/envs vsegs / cycle %d\n",
1897__FUNCTION__, pid, thread->trdid, cycle );
1898#endif
1899
1900    // register code & data vsegs as well as entry-point in process VMM,
1901    // and register extended pointer on .elf file in process descriptor
1902        error = elf_load_process( file_xp , process );
1903    if( error )
1904        {
1905                printk("\n[ERROR] in %s : failed to access <%s>\n", __FUNCTION__ , path );
1906        vfs_close( file_xp , file_id );
1907        // FIXME restore old process VMM [AG]
1908        return -1;
1909        }
1910
1911#if( DEBUG_PROCESS_MAKE_EXEC & 1 )
1912cycle = (uint32_t)hal_get_cycles();
1913if( DEBUG_PROCESS_MAKE_EXEC < cycle )
1914printk("\n[%s] thread[%x,%x] registered code/data vsegs / cycle %d\n",
1915__FUNCTION__, pid, thread->trdid, cycle );
1916#endif
1917
1918    // update the existing main thread descriptor... and jump to user code
1919    error = thread_user_exec( (void *)process->vmm.entry_point,
1920                              args_nr,
1921                              args_pointers );
1922    if( error )
1923    {
1924        printk("\n[ERROR] in %s : cannot update main thread for %s\n", __FUNCTION__ , path );
1925        vfs_close( file_xp , file_id );
1926        // FIXME restore old process VMM
1927        return -1;
1928    }
1929
1930    assert( false, "we should not execute this code");
1931 
1932        return 0;
1933
1934}  // end process_make_exec()
1935
1936
1937////////////////////////////////////////////////
1938void process_zero_create( process_t   * process,
1939                          boot_info_t * info )
1940{
1941    error_t error;
1942    pid_t   pid;
1943
1944#if DEBUG_PROCESS_ZERO_CREATE
1945uint32_t cycle = (uint32_t)hal_get_cycles();
1946if( DEBUG_PROCESS_ZERO_CREATE < cycle )
1947printk("\n[%s] enter / cluster %x / cycle %d\n",
1948__FUNCTION__, local_cxy, cycle );
1949#endif
1950
1951    // get pointer on VMM
1952    vmm_t * vmm = &process->vmm;
1953
1954    // get PID from local cluster manager for this kernel process
1955    error = cluster_pid_alloc( process , &pid );
1956
1957    if( error || (LPID_FROM_PID( pid ) != 0) )
1958    {
1959        printk("\n[PANIC] in %s : cannot get valid PID in cluster %x / PID = %x\n",
1960        __FUNCTION__ , local_cxy, pid );
1961        hal_core_sleep();
1962    }
1963
1964#if (DEBUG_PROCESS_ZERO_CREATE & 1)
1965if( DEBUG_PROCESS_ZERO_CREATE < cycle )
1966printk("\n[%s] allocated pid %x in cluster %x\n", __FUNCTION__, pid, local_cxy );
1967#endif
1968
1969    // initialize PID, REF_XP, PARENT_XP, and STATE
1970    // the kernel process_zero is its own parent_process,
1971    // reference_process, and owner_process, and cannot be killed...
1972    process->pid        = pid;
1973    process->ref_xp     = XPTR( local_cxy , process );
1974    process->owner_xp   = XPTR( local_cxy , process );
1975    process->parent_xp  = XPTR( local_cxy , process );
1976    process->term_state = 0;
1977
1978    // initialize VSL as empty
1979    vmm->vsegs_nr = 0;
1980        xlist_root_init( XPTR( local_cxy , &vmm->vsegs_root ) );
1981
1982#if (DEBUG_PROCESS_ZERO_CREATE & 1)
1983if( DEBUG_PROCESS_ZERO_CREATE < cycle )
1984printk("\n[%s] initialized VSL empty in cluster %x\n", __FUNCTION__, local_cxy );
1985#endif
1986
1987    // initialize GPT as empty
1988    error = hal_gpt_create( &vmm->gpt );
1989
1990    if( error ) 
1991    {
1992        printk("\n[PANIC] in %s : cannot create empty GPT\n", __FUNCTION__ );
1993        hal_core_sleep();
1994    }
1995
1996#if (DEBUG_PROCESS_ZERO_CREATE & 1)
1997if( DEBUG_PROCESS_ZERO_CREATE < cycle )
1998printk("\n[%s] initialized GPT empty in cluster %x\n", __FUNCTION__, local_cxy );
1999#endif
2000
2001    // initialize VSL and GPT locks
2002    remote_rwlock_init( XPTR( local_cxy , &vmm->vsl_lock ) , LOCK_VMM_VSL );
2003   
2004    // create kernel vsegs in GPT and VSL, as required by the hardware architecture
2005    error = hal_vmm_kernel_init( info );
2006
2007    if( error ) 
2008    {
2009        printk("\n[PANIC] in %s : cannot create kernel vsegs in VMM\n", __FUNCTION__ );
2010        hal_core_sleep();
2011    }
2012
2013#if (DEBUG_PROCESS_ZERO_CREATE & 1)
2014if( DEBUG_PROCESS_ZERO_CREATE < cycle )
2015printk("\n[%s] initialized hal specific VMM in cluster%x\n", __FUNCTION__, local_cxy );
2016#endif
2017
2018    // reset th_tbl[] array and associated fields
2019    uint32_t i;
2020    for( i = 0 ; i < CONFIG_THREADS_MAX_PER_CLUSTER ; i++ )
2021        {
2022        process->th_tbl[i] = NULL;
2023    }
2024    process->th_nr  = 0;
2025    rwlock_init( &process->th_lock , LOCK_PROCESS_THTBL );
2026
2027#if (DEBUG_PROCESS_ZERO_CREATE & 1)
2028if( DEBUG_PROCESS_ZERO_CREATE < cycle )
2029printk("\n[%s] initialized th_tbl[] in cluster%x\n", __FUNCTION__, local_cxy );
2030#endif
2031
2032    // reset children list as empty
2033    xlist_root_init( XPTR( local_cxy , &process->children_root ) );
2034    process->children_nr = 0;
2035    remote_queuelock_init( XPTR( local_cxy , &process->children_lock ),
2036                           LOCK_PROCESS_CHILDREN );
2037
2038#if (DEBUG_PROCESS_ZERO_CREATE & 1)
2039if( DEBUG_PROCESS_ZERO_CREATE < cycle )
2040printk("\n[%s] initialized children list in cluster%x\n", __FUNCTION__, local_cxy );
2041#endif
2042
2043    // register kernel process in cluster manager local_list
2044    cluster_process_local_link( process );
2045   
2046        hal_fence();
2047
2048#if DEBUG_PROCESS_ZERO_CREATE
2049cycle = (uint32_t)hal_get_cycles();
2050if( DEBUG_PROCESS_ZERO_CREATE < cycle )
2051printk("\n[%s] exit / cluster %x / cycle %d\n",
2052__FUNCTION__, local_cxy, cycle );
2053#endif
2054
2055}  // end process_zero_create()
2056
2057////////////////////////////////
2058void process_init_create( void )
2059{
2060    process_t      * process;       // local pointer on process descriptor
2061    pid_t            pid;           // process_init identifier
2062    thread_t       * thread;        // local pointer on main thread
2063    pthread_attr_t   attr;          // main thread attributes
2064    lid_t            lid;           // selected core local index for main thread
2065    xptr_t           file_xp;       // extended pointer on .elf file descriptor
2066    uint32_t         file_id;       // file index in fd_array
2067    error_t          error;
2068
2069#if DEBUG_PROCESS_INIT_CREATE
2070thread_t * this = CURRENT_THREAD;
2071uint32_t cycle = (uint32_t)hal_get_cycles();
2072if( DEBUG_PROCESS_INIT_CREATE < cycle )
2073printk("\n[%s] thread[%x,%x] enter / cycle %d\n",
2074__FUNCTION__, this->process->pid, this->trdid, cycle );
2075#endif
2076
2077    // allocates memory for process descriptor from local cluster
2078        process = process_alloc(); 
2079    if( process == NULL )
2080    {
2081        printk("\n[PANIC] in %s : cannot allocate process\n", __FUNCTION__ );
2082        hal_core_sleep();
2083    }
2084
2085    // set the CWD and VFS_ROOT fields in process descriptor
2086    process->cwd_xp      = process_zero.vfs_root_xp;
2087    process->vfs_root_xp = process_zero.vfs_root_xp;
2088
2089    // get PID from local cluster
2090    error = cluster_pid_alloc( process , &pid );
2091    if( error ) 
2092    {
2093        printk("\n[PANIC] in %s : cannot allocate PID\n", __FUNCTION__ );
2094        hal_core_sleep();
2095    }
2096    if( pid != 1 ) 
2097    {
2098        printk("\n[PANIC] in %s : process PID must be 0x1\n", __FUNCTION__ );
2099        hal_core_sleep();
2100    }
2101
2102    // initialize process descriptor / parent is local process_zero
2103    error = process_reference_init( process,
2104                                    pid,
2105                                    XPTR( local_cxy , &process_zero ) ); 
2106    if( error )
2107    {
2108        printk("\n[PANIC] in %s : cannot initialize process\n", __FUNCTION__ );
2109        hal_core_sleep();
2110    }
2111
2112#if(DEBUG_PROCESS_INIT_CREATE & 1)
2113if( DEBUG_PROCESS_INIT_CREATE < cycle )
2114printk("\n[%s] thread[%x,%x] initialized process descriptor\n",
2115__FUNCTION__, this->process->pid, this->trdid );
2116#endif
2117
2118    // open the file identified by CONFIG_PROCESS_INIT_PATH
2119    file_xp = XPTR_NULL;
2120    file_id = -1;
2121        error   = vfs_open( process->vfs_root_xp,
2122                            CONFIG_PROCESS_INIT_PATH,
2123                        XPTR( local_cxy , process ),
2124                            O_RDONLY,
2125                            0,
2126                            &file_xp,
2127                            &file_id );
2128    if( error )
2129    {
2130        printk("\n[PANIC] in %s : cannot open file <%s>\n",
2131         __FUNCTION__, CONFIG_PROCESS_INIT_PATH  );
2132        hal_core_sleep();
2133    }
2134
2135#if(DEBUG_PROCESS_INIT_CREATE & 1)
2136if( DEBUG_PROCESS_INIT_CREATE < cycle )
2137printk("\n[%s] thread[%x,%x] open .elf file decriptor\n",
2138__FUNCTION__, this->process->pid, this->trdid );
2139#endif
2140
2141    // register "code" and "data" vsegs as well as entry-point
2142    // in process VMM, using information contained in the elf file.
2143        error = elf_load_process( file_xp , process );
2144
2145    if( error ) 
2146    {
2147        printk("\n[PANIC] in %s : cannot access file <%s>\n",
2148         __FUNCTION__, CONFIG_PROCESS_INIT_PATH  );
2149        hal_core_sleep();
2150    }
2151
2152
2153#if(DEBUG_PROCESS_INIT_CREATE & 1)
2154if( DEBUG_PROCESS_INIT_CREATE < cycle )
2155printk("\n[%s] thread[%x,%x] registered code/data vsegs in VMM\n",
2156__FUNCTION__, this->process->pid, this->trdid );
2157#endif
2158
2159#if (DEBUG_PROCESS_INIT_CREATE & 1)
2160hal_vmm_display( XPTR( local_cxy , process ) , true );
2161#endif
2162
2163    // get extended pointers on process_zero children_root, children_lock
2164    xptr_t children_root_xp = XPTR( local_cxy , &process_zero.children_root );
2165    xptr_t children_lock_xp = XPTR( local_cxy , &process_zero.children_lock );
2166
2167    // take lock protecting kernel process children list
2168    remote_queuelock_acquire( children_lock_xp );
2169
2170    // register process INIT in parent local process_zero
2171        xlist_add_last( children_root_xp , XPTR( local_cxy , &process->children_list ) );
2172        hal_atomic_add( &process_zero.children_nr , 1 );
2173
2174    // release lock protecting kernel process children list
2175    remote_queuelock_release( children_lock_xp );
2176
2177#if(DEBUG_PROCESS_INIT_CREATE & 1)
2178if( DEBUG_PROCESS_INIT_CREATE < cycle )
2179printk("\n[%s] thread[%x,%x] registered init process in parent\n",
2180__FUNCTION__, this->process->pid, this->trdid );
2181#endif
2182
2183    // select a core in local cluster to execute the main thread
2184    lid  = cluster_select_local_core( local_cxy );
2185
2186    // initialize pthread attributes for main thread
2187    attr.attributes = PT_ATTR_DETACH | PT_ATTR_CLUSTER_DEFINED | PT_ATTR_CORE_DEFINED;
2188    attr.cxy        = local_cxy;
2189    attr.lid        = lid;
2190
2191    // create and initialize thread descriptor
2192        error = thread_user_create( pid,
2193                                (void *)process->vmm.entry_point,
2194                                NULL,
2195                                &attr,
2196                                &thread );
2197
2198    if( error )
2199    {
2200        printk("\n[PANIC] in %s : cannot create main thread\n", __FUNCTION__  );
2201        hal_core_sleep();
2202    }
2203    if( thread->trdid != 0 )
2204    {
2205        printk("\n[PANIC] in %s : bad main thread trdid\n", __FUNCTION__  );
2206        hal_core_sleep();
2207    }
2208
2209#if(DEBUG_PROCESS_INIT_CREATE & 1)
2210if( DEBUG_PROCESS_INIT_CREATE < cycle )
2211printk("\n[%s] thread[%x,%x] created main thread\n",
2212__FUNCTION__, this->process->pid, this->trdid );
2213#endif
2214
2215    // activate thread
2216        thread_unblock( XPTR( local_cxy , thread ) , THREAD_BLOCKED_GLOBAL );
2217
2218    hal_fence();
2219
2220#if DEBUG_PROCESS_INIT_CREATE
2221cycle = (uint32_t)hal_get_cycles();
2222if( DEBUG_PROCESS_INIT_CREATE < cycle )
2223printk("\n[%s] thread[%x,%x] exit / cycle %d\n",
2224__FUNCTION__, this->process->pid, this->trdid, cycle );
2225#endif
2226
2227}  // end process_init_create()
2228
2229/////////////////////////////////////////
2230void process_display( xptr_t process_xp )
2231{
2232    process_t   * process_ptr;
2233    cxy_t         process_cxy;
2234
2235    xptr_t        parent_xp;       // extended pointer on parent process
2236    process_t   * parent_ptr;
2237    cxy_t         parent_cxy;
2238
2239    xptr_t        owner_xp;        // extended pointer on owner process
2240    process_t   * owner_ptr;
2241    cxy_t         owner_cxy;
2242
2243    pid_t         pid;
2244    pid_t         ppid;
2245    lpid_t        lpid;
2246    uint32_t      state;
2247    uint32_t      th_nr;
2248
2249    xptr_t        txt_file_xp;     // extended pointer on TXT_RX file descriptor
2250    xptr_t        txt_chdev_xp;    // extended pointer on TXT_RX chdev
2251    chdev_t     * txt_chdev_ptr;
2252    cxy_t         txt_chdev_cxy;
2253    xptr_t        txt_owner_xp;    // extended pointer on TXT owner process
2254
2255    xptr_t        elf_file_xp;     // extended pointer on .elf file
2256    cxy_t         elf_file_cxy;
2257    vfs_file_t  * elf_file_ptr;
2258    vfs_inode_t * elf_inode_ptr;   // local pointer on .elf inode
2259
2260    char          txt_name[CONFIG_VFS_MAX_NAME_LENGTH];
2261    char          elf_name[CONFIG_VFS_MAX_NAME_LENGTH];
2262
2263    // get cluster and local pointer on process
2264    process_ptr = GET_PTR( process_xp );
2265    process_cxy = GET_CXY( process_xp );
2266
2267    // get process PID, LPID, and state
2268    pid   = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid ) );
2269    lpid  = LPID_FROM_PID( pid );
2270    state = hal_remote_l32( XPTR( process_cxy , &process_ptr->term_state ) );
2271
2272    // get process PPID
2273    parent_xp  = hal_remote_l64( XPTR( process_cxy , &process_ptr->parent_xp ) );
2274    parent_cxy = GET_CXY( parent_xp );
2275    parent_ptr = GET_PTR( parent_xp );
2276    ppid       = hal_remote_l32( XPTR( parent_cxy , &parent_ptr->pid ) );
2277
2278    // get number of threads
2279    th_nr      = hal_remote_l32( XPTR( process_cxy , &process_ptr->th_nr ) );
2280
2281    // get pointers on owner process descriptor
2282    owner_xp  = hal_remote_l64( XPTR( process_cxy , &process_ptr->owner_xp ) );
2283    owner_cxy = GET_CXY( owner_xp );
2284    owner_ptr = GET_PTR( owner_xp );
2285
2286    // get process TXT name and .elf name
2287    if( lpid )                                   // user process
2288    {
2289
2290        // get extended pointer on file descriptor associated to TXT_RX
2291        txt_file_xp = hal_remote_l64( XPTR( owner_cxy , &owner_ptr->fd_array.array[0] ) );
2292
2293        assert( (txt_file_xp != XPTR_NULL) ,
2294        "process must be attached to one TXT terminal" ); 
2295
2296        // get TXT_RX chdev pointers
2297        txt_chdev_xp  = chdev_from_file( txt_file_xp );
2298        txt_chdev_cxy = GET_CXY( txt_chdev_xp );
2299        txt_chdev_ptr = GET_PTR( txt_chdev_xp );
2300
2301        // get TXT_RX name and ownership
2302        hal_remote_strcpy( XPTR( local_cxy , txt_name ) ,
2303                           XPTR( txt_chdev_cxy , txt_chdev_ptr->name ) );
2304   
2305        txt_owner_xp = (xptr_t)hal_remote_l64( XPTR( txt_chdev_cxy, 
2306                                                     &txt_chdev_ptr->ext.txt.owner_xp ) );
2307
2308        // get process .elf name
2309        elf_file_xp   = hal_remote_l64( XPTR( process_cxy , &process_ptr->vfs_bin_xp ) );
2310        elf_file_cxy  = GET_CXY( elf_file_xp );
2311        elf_file_ptr  = GET_PTR( elf_file_xp );
2312        elf_inode_ptr = hal_remote_lpt( XPTR( elf_file_cxy , &elf_file_ptr->inode ) );
2313        vfs_inode_get_name( XPTR( elf_file_cxy , elf_inode_ptr ) , elf_name );
2314    }
2315    else                                         // kernel process_zero
2316    {
2317        // TXT name and .elf name are not registered in kernel process_zero
2318        strcpy( txt_name , "txt0_rx" );
2319        txt_owner_xp = process_xp; 
2320        strcpy( elf_name , "kernel.elf" );
2321    }
2322
2323    // display process info
2324    if( txt_owner_xp == process_xp )
2325    {
2326        nolock_printk("PID %X | %s (FG) | %X | PPID %X | TS %X | %d | %s\n", 
2327        pid, txt_name, process_ptr, ppid, state, th_nr, elf_name );
2328    }
2329    else
2330    {
2331        nolock_printk("PID %X | %s (BG) | %X | PPID %X | TS %X | %d | %s\n", 
2332        pid, txt_name, process_ptr, ppid, state, th_nr, elf_name );
2333    }
2334}  // end process_display()
2335
2336
2337////////////////////////////////////////////////////////////////////////////////////////
2338//     Terminals related functions
2339////////////////////////////////////////////////////////////////////////////////////////
2340
2341//////////////////////////////////
2342uint32_t process_txt_alloc( void )
2343{
2344    uint32_t  index;       // TXT terminal index
2345    xptr_t    chdev_xp;    // extended pointer on TXT_RX chdev
2346    chdev_t * chdev_ptr;   // local pointer on TXT_RX chdev
2347    cxy_t     chdev_cxy;   // TXT_RX chdev cluster
2348    xptr_t    root_xp;     // extended pointer on owner field in chdev
2349
2350    // scan the user TXT_RX chdevs (TXT0 is reserved for kernel)
2351    for( index = 1 ; index < LOCAL_CLUSTER->nb_txt_channels ; index ++ )
2352    {
2353        // get pointers on TXT_RX[index]
2354        chdev_xp  = chdev_dir.txt_rx[index];
2355        chdev_cxy = GET_CXY( chdev_xp );
2356        chdev_ptr = GET_PTR( chdev_xp );
2357
2358        // get extended pointer on root of attached process
2359        root_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.root );
2360
2361        // return free TXT index if found
2362        if( xlist_is_empty( root_xp ) ) return index; 
2363    }
2364
2365    assert( false , "no free TXT terminal found" );
2366
2367    return -1;
2368
2369} // end process_txt_alloc()
2370
2371/////////////////////////////////////////////
2372void process_txt_attach( process_t * process,
2373                         uint32_t    txt_id )
2374{
2375    xptr_t      chdev_xp;     // extended pointer on TXT_RX chdev
2376    cxy_t       chdev_cxy;    // TXT_RX chdev cluster
2377    chdev_t *   chdev_ptr;    // local pointer on TXT_RX chdev
2378    xptr_t      root_xp;      // extended pointer on list root in chdev
2379    xptr_t      lock_xp;      // extended pointer on list lock in chdev
2380
2381// check process is in owner cluster
2382assert( (CXY_FROM_PID( process->pid ) == local_cxy) ,
2383"process descriptor not in owner cluster" );
2384
2385// check terminal index
2386assert( (txt_id < LOCAL_CLUSTER->nb_txt_channels) ,
2387"illegal TXT terminal index" );
2388
2389    // get pointers on TXT_RX[txt_id] chdev
2390    chdev_xp  = chdev_dir.txt_rx[txt_id];
2391    chdev_cxy = GET_CXY( chdev_xp );
2392    chdev_ptr = GET_PTR( chdev_xp );
2393
2394    // get extended pointer on root & lock of attached process list
2395    root_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.root );
2396    lock_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.lock );
2397
2398    // get lock protecting list of processes attached to TXT
2399    remote_busylock_acquire( lock_xp );
2400
2401    // insert process in attached process list
2402    xlist_add_last( root_xp , XPTR( local_cxy , &process->txt_list ) );
2403
2404    // release lock protecting list of processes attached to TXT
2405    remote_busylock_release( lock_xp );
2406
2407#if DEBUG_PROCESS_TXT
2408thread_t * this = CURRENT_THREAD;
2409uint32_t cycle = (uint32_t)hal_get_cycles();
2410if( DEBUG_PROCESS_TXT < cycle )
2411printk("\n[%s] thread[%x,%x] attached process %x to TXT %d / cycle %d\n",
2412__FUNCTION__, this->process->pid, this->trdid, process->pid, txt_id , cycle );
2413#endif
2414
2415} // end process_txt_attach()
2416
2417/////////////////////////////////////////////
2418void process_txt_detach( xptr_t  process_xp )
2419{
2420    process_t * process_ptr;  // local pointer on process in owner cluster
2421    cxy_t       process_cxy;  // process owner cluster
2422    pid_t       process_pid;  // process identifier
2423    xptr_t      file_xp;      // extended pointer on stdin file
2424    xptr_t      chdev_xp;     // extended pointer on TXT_RX chdev
2425    cxy_t       chdev_cxy;    // TXT_RX chdev cluster
2426    chdev_t *   chdev_ptr;    // local pointer on TXT_RX chdev
2427    xptr_t      lock_xp;      // extended pointer on list lock in chdev
2428
2429    // get process cluster, local pointer, and PID
2430    process_cxy = GET_CXY( process_xp );
2431    process_ptr = GET_PTR( process_xp );
2432    process_pid = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid ) );
2433
2434// check process descriptor in owner cluster
2435assert( (CXY_FROM_PID( process_pid ) == process_cxy ) ,
2436"process descriptor not in owner cluster" );
2437
2438    // release TXT ownership (does nothing if not TXT owner)
2439    process_txt_transfer_ownership( process_xp );
2440
2441    // get extended pointer on process stdin pseudo file
2442    file_xp = (xptr_t)hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[0] ) );
2443
2444    // get pointers on TXT_RX chdev
2445    chdev_xp  = chdev_from_file( file_xp );
2446    chdev_cxy = GET_CXY( chdev_xp );
2447    chdev_ptr = (chdev_t *)GET_PTR( chdev_xp );
2448
2449    // get extended pointer on lock protecting attached process list
2450    lock_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.lock );
2451
2452    // get lock protecting list of processes attached to TXT
2453    remote_busylock_acquire( lock_xp );
2454
2455    // unlink process from attached process list
2456    xlist_unlink( XPTR( process_cxy , &process_ptr->txt_list ) );
2457
2458    // release lock protecting list of processes attached to TXT
2459    remote_busylock_release( lock_xp );
2460
2461#if DEBUG_PROCESS_TXT
2462thread_t * this = CURRENT_THREAD;
2463uint32_t cycle  = (uint32_t)hal_get_cycles();
2464uint32_t txt_id = hal_remote_l32( XPTR( chdev_cxy , &chdev_ptr->channel ) );
2465if( DEBUG_PROCESS_TXT < cycle )
2466printk("\n[%s] thread[%x,%x] detached process %x from TXT%d / cycle %d\n",
2467__FUNCTION__, this->process->pid, this->trdid, process_pid, txt_id, cycle );
2468#endif
2469
2470} // end process_txt_detach()
2471
2472///////////////////////////////////////////////////
2473void process_txt_set_ownership( xptr_t process_xp )
2474{
2475    process_t * process_ptr;
2476    cxy_t       process_cxy;
2477    pid_t       process_pid;
2478    xptr_t      file_xp;
2479    xptr_t      txt_xp;     
2480    chdev_t   * txt_ptr;
2481    cxy_t       txt_cxy;
2482
2483    // get pointers on process in owner cluster
2484    process_cxy = GET_CXY( process_xp );
2485    process_ptr = GET_PTR( process_xp );
2486    process_pid = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid ) );
2487
2488    // check owner cluster
2489    assert( (process_cxy == CXY_FROM_PID( process_pid )) ,
2490    "process descriptor not in owner cluster" );
2491
2492    // get extended pointer on stdin pseudo file
2493    file_xp = hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[0] ) );
2494
2495    // get pointers on TXT chdev
2496    txt_xp  = chdev_from_file( file_xp );
2497    txt_cxy = GET_CXY( txt_xp );
2498    txt_ptr = GET_PTR( txt_xp );
2499
2500    // set owner field in TXT chdev
2501    hal_remote_s64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , process_xp );
2502
2503#if DEBUG_PROCESS_TXT
2504thread_t * this = CURRENT_THREAD;
2505uint32_t cycle  = (uint32_t)hal_get_cycles();
2506uint32_t txt_id = hal_remote_l32( XPTR( txt_cxy , &txt_ptr->channel ) );
2507if( DEBUG_PROCESS_TXT < cycle )
2508printk("\n[%s] thread[%x,%x] give TXT%d ownership to process %x / cycle %d\n",
2509__FUNCTION__, this->process->pid, this->trdid, txt_id, process_pid, cycle );
2510#endif
2511
2512}  // end process_txt_set ownership()
2513
2514////////////////////////////////////////////////////////
2515void process_txt_transfer_ownership( xptr_t process_xp )
2516{
2517    process_t * process_ptr;     // local pointer on process releasing ownership
2518    cxy_t       process_cxy;     // process cluster
2519    pid_t       process_pid;     // process identifier
2520    xptr_t      file_xp;         // extended pointer on TXT_RX pseudo file
2521    xptr_t      txt_xp;          // extended pointer on TXT_RX chdev
2522    chdev_t   * txt_ptr;         // local pointer on TXT_RX chdev
2523    cxy_t       txt_cxy;         // cluster of TXT_RX chdev
2524    uint32_t    txt_id;          // TXT_RX channel
2525    xptr_t      owner_xp;        // extended pointer on current TXT_RX owner
2526    xptr_t      root_xp;         // extended pointer on root of attached process list
2527    xptr_t      lock_xp;         // extended pointer on lock protecting attached process list
2528    xptr_t      iter_xp;         // iterator for xlist
2529    xptr_t      current_xp;      // extended pointer on current process
2530    bool_t      found;
2531
2532#if DEBUG_PROCESS_TXT
2533thread_t * this  = CURRENT_THREAD;
2534uint32_t   cycle;
2535#endif
2536
2537    // get pointers on target process
2538    process_cxy = GET_CXY( process_xp );
2539    process_ptr = GET_PTR( process_xp );
2540    process_pid = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid ) );
2541
2542// check owner cluster
2543assert( (process_cxy == CXY_FROM_PID( process_pid )) ,
2544"process descriptor not in owner cluster" );
2545
2546    // get extended pointer on stdin pseudo file
2547    file_xp = hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[0] ) );
2548
2549    // get pointers on TXT chdev
2550    txt_xp  = chdev_from_file( file_xp );
2551    txt_cxy = GET_CXY( txt_xp );
2552    txt_ptr = GET_PTR( txt_xp );
2553
2554    // get relevant infos from chdev descriptor
2555    owner_xp = hal_remote_l64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) );
2556    txt_id   = hal_remote_l32( XPTR( txt_cxy , &txt_ptr->channel ) );
2557
2558    // transfer ownership only if target process is the TXT owner
2559    if( (owner_xp == process_xp) && (txt_id > 0) ) 
2560    {
2561        // get extended pointers on root and lock of attached processes list
2562        root_xp = XPTR( txt_cxy , &txt_ptr->ext.txt.root );
2563        lock_xp = XPTR( txt_cxy , &txt_ptr->ext.txt.lock );
2564
2565        if( process_get_ppid( process_xp ) != 1 )       // target process is not KSH
2566        {
2567            // get lock
2568            remote_busylock_acquire( lock_xp );
2569
2570            // scan attached process list to find KSH process
2571            found = false;
2572            for( iter_xp = hal_remote_l64( root_xp ) ;
2573                 (iter_xp != root_xp) && (found == false) ;
2574                 iter_xp = hal_remote_l64( iter_xp ) )
2575            {
2576                current_xp = XLIST_ELEMENT( iter_xp , process_t , txt_list );
2577
2578                if( process_get_ppid( current_xp ) == 1 )  // current is KSH
2579                {
2580                    // set owner field in TXT chdev
2581                    hal_remote_s64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , current_xp );
2582
2583#if DEBUG_PROCESS_TXT
2584cycle = (uint32_t)hal_get_cycles();
2585if( DEBUG_PROCESS_TXT < cycle )
2586printk("\n[%s] thread[%x,%x] transfered TXT%d ownership to KSH / cycle %d\n",
2587__FUNCTION__, this->process->pid, this->trdid, txt_id, cycle );
2588#endif
2589                    found = true;
2590                }
2591            }
2592
2593            // release lock
2594            remote_busylock_release( lock_xp );
2595
2596// It must exist a KSH process for each user TXT channel
2597assert( (found == true), "KSH process not found for TXT%d", txt_id );
2598
2599        }
2600        else                                           // target process is KSH
2601        {
2602            // get lock
2603            remote_busylock_acquire( lock_xp );
2604
2605            // scan attached process list to find another process
2606            found = false;
2607            for( iter_xp = hal_remote_l64( root_xp ) ;
2608                 (iter_xp != root_xp) && (found == false) ;
2609                 iter_xp = hal_remote_l64( iter_xp ) )
2610            {
2611                current_xp  = XLIST_ELEMENT( iter_xp , process_t , txt_list );
2612
2613                if( current_xp != process_xp )            // current is not KSH
2614                {
2615                    // set owner field in TXT chdev
2616                    hal_remote_s64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , current_xp );
2617
2618#if DEBUG_PROCESS_TXT
2619cycle  = (uint32_t)hal_get_cycles();
2620cxy_t       current_cxy = GET_CXY( current_xp );
2621process_t * current_ptr = GET_PTR( current_xp );
2622uint32_t    new_pid     = hal_remote_l32( XPTR( current_cxy , &current_ptr->pid ) );
2623if( DEBUG_PROCESS_TXT < cycle )
2624printk("\n[%s] thread[%x,%x] transfered TXT%d ownership to process %x / cycle %d\n",
2625__FUNCTION__,this->process->pid, this->trdid, txt_id, new_pid, cycle );
2626#endif
2627                    found = true;
2628                }
2629            }
2630
2631            // release lock
2632            remote_busylock_release( lock_xp );
2633
2634            // no more owner for TXT if no other process found
2635            if( found == false )
2636            {
2637                // set owner field in TXT chdev
2638                hal_remote_s64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , XPTR_NULL );
2639
2640#if DEBUG_PROCESS_TXT
2641cycle = (uint32_t)hal_get_cycles();
2642if( DEBUG_PROCESS_TXT < cycle )
2643printk("\n[%s] thread[%x,%x] released TXT%d (no attached process) / cycle %d\n",
2644__FUNCTION__, this->process->pid, this->trdid, txt_id, cycle );
2645#endif
2646            }
2647        }
2648    }
2649    else
2650    {
2651
2652#if DEBUG_PROCESS_TXT
2653cycle = (uint32_t)hal_get_cycles();
2654if( DEBUG_PROCESS_TXT < cycle )
2655printk("\n[%s] thread[%x,%x] does nothing for process %x (not TXT owner) / cycle %d\n",
2656__FUNCTION__, this->process->pid, this->trdid, process_pid, cycle );
2657#endif
2658
2659    }
2660
2661}  // end process_txt_transfer_ownership()
2662
2663
2664////////////////////////////////////////////////
2665bool_t process_txt_is_owner( xptr_t process_xp )
2666{
2667    // get local pointer and cluster of process in owner cluster
2668    cxy_t       process_cxy = GET_CXY( process_xp );
2669    process_t * process_ptr = GET_PTR( process_xp );
2670
2671// check calling thread execute in target process owner cluster
2672pid_t process_pid = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid ) );
2673assert( (process_cxy == CXY_FROM_PID( process_pid )) ,
2674"process descriptor not in owner cluster" );
2675
2676    // get extended pointer on stdin pseudo file
2677    xptr_t file_xp = hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[0] ) );
2678
2679    // get pointers on TXT chdev
2680    xptr_t    txt_xp  = chdev_from_file( file_xp );
2681    cxy_t     txt_cxy = GET_CXY( txt_xp );
2682    chdev_t * txt_ptr = GET_PTR( txt_xp );
2683
2684    // get extended pointer on TXT_RX owner process
2685    xptr_t owner_xp = hal_remote_l64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) );
2686
2687    return (process_xp == owner_xp);
2688
2689}   // end process_txt_is_owner()
2690
2691////////////////////////////////////////////////     
2692xptr_t process_txt_get_owner( uint32_t channel )
2693{
2694    xptr_t      txt_rx_xp  = chdev_dir.txt_rx[channel];
2695    cxy_t       txt_rx_cxy = GET_CXY( txt_rx_xp );
2696    chdev_t *   txt_rx_ptr = GET_PTR( txt_rx_xp );
2697
2698    return (xptr_t)hal_remote_l64( XPTR( txt_rx_cxy , &txt_rx_ptr->ext.txt.owner_xp ) );
2699
2700}  // end process_txt_get_owner()
2701
2702///////////////////////////////////////////
2703void process_txt_display( uint32_t txt_id )
2704{
2705    xptr_t      chdev_xp;
2706    cxy_t       chdev_cxy;
2707    chdev_t   * chdev_ptr;
2708    xptr_t      root_xp;
2709    xptr_t      lock_xp;
2710    xptr_t      current_xp;
2711    xptr_t      iter_xp;
2712    cxy_t       txt0_cxy;
2713    chdev_t   * txt0_ptr;
2714    xptr_t      txt0_xp;
2715    xptr_t      txt0_lock_xp;
2716   
2717    assert( (txt_id < LOCAL_CLUSTER->nb_txt_channels) ,
2718    "illegal TXT terminal index" );
2719
2720    // get pointers on TXT0 chdev
2721    txt0_xp  = chdev_dir.txt_tx[0];
2722    txt0_cxy = GET_CXY( txt0_xp );
2723    txt0_ptr = GET_PTR( txt0_xp );
2724
2725    // get extended pointer on TXT0 lock
2726    txt0_lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock );
2727
2728    // get pointers on TXT_RX[txt_id] chdev
2729    chdev_xp  = chdev_dir.txt_rx[txt_id];
2730    chdev_cxy = GET_CXY( chdev_xp );
2731    chdev_ptr = GET_PTR( chdev_xp );
2732
2733    // get extended pointer on root & lock of attached process list
2734    root_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.root );
2735    lock_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.lock );
2736
2737    // get lock on attached process list
2738    remote_busylock_acquire( lock_xp );
2739
2740    // get TXT0 lock in busy waiting mode
2741    remote_busylock_acquire( txt0_lock_xp );
2742
2743    // display header
2744    nolock_printk("\n***** processes attached to TXT_%d / cycle %d\n",
2745    txt_id , (uint32_t)hal_get_cycles() );
2746
2747    // scan attached process list
2748    XLIST_FOREACH( root_xp , iter_xp )
2749    {
2750        current_xp  = XLIST_ELEMENT( iter_xp , process_t , txt_list );
2751        process_display( current_xp );
2752    }
2753
2754    // release TXT0 lock in busy waiting mode
2755    remote_busylock_release( txt0_lock_xp );
2756
2757    // release lock on attached process list
2758    remote_busylock_release( lock_xp );
2759
2760}  // end process_txt_display
Note: See TracBrowser for help on using the repository browser.