source: trunk/kernel/kern/process.c @ 635

Last change on this file since 635 was 635, checked in by alain, 15 months ago

This version is a major evolution: The physical memory allocators,
defined in the kmem.c, ppm.c, and kcm.c files have been modified
to support remote accesses. The RPCs that were previously user
to allocate physical memory in a remote cluster have been removed.
This has been done to cure a dead-lock in case of concurrent page-faults.

This version 2.2 has been tested on a (4 clusters / 2 cores per cluster)
TSAR architecture, for both the "sort" and the "fft" applications.

File size: 84.5 KB
Line 
1/*
2 * process.c - process related functions definition.
3 *
4 * Authors  Ghassan Almaless (2008,2009,2010,2011,2012)
5 *          Mohamed Lamine Karaoui (2015)
6 *          Alain Greiner (2016,2017,2018,2019)
7 *
8 * Copyright (c) UPMC Sorbonne Universites
9 *
10 * This file is part of ALMOS-MKH.
11 *
12 * ALMOS-MKH is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; version 2.0 of the License.
15 *
16 * ALMOS-MKH is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with ALMOS-MKH; if not, write to the Free Software Foundation,
23 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24 */
25
26#include <kernel_config.h>
27#include <hal_kernel_types.h>
28#include <hal_remote.h>
29#include <hal_uspace.h>
30#include <hal_irqmask.h>
31#include <hal_vmm.h>
32#include <errno.h>
33#include <printk.h>
34#include <memcpy.h>
35#include <bits.h>
36#include <kmem.h>
37#include <page.h>
38#include <vmm.h>
39#include <vfs.h>
40#include <core.h>
41#include <thread.h>
42#include <chdev.h>
43#include <list.h>
44#include <string.h>
45#include <scheduler.h>
46#include <busylock.h>
47#include <queuelock.h>
48#include <remote_queuelock.h>
49#include <rwlock.h>
50#include <remote_rwlock.h>
51#include <dqdt.h>
52#include <cluster.h>
53#include <ppm.h>
54#include <boot_info.h>
55#include <process.h>
56#include <elf.h>
57#include <syscalls.h>
58#include <shared_syscalls.h>
59
60//////////////////////////////////////////////////////////////////////////////////////////
61// Extern global variables
62//////////////////////////////////////////////////////////////////////////////////////////
63
64extern process_t           process_zero;     // allocated in kernel_init.c
65extern chdev_directory_t   chdev_dir;        // allocated in kernel_init.c
66
67//////////////////////////////////////////////////////////////////////////////////////////
68// Process initialisation related functions
69//////////////////////////////////////////////////////////////////////////////////////////
70
71/////////////////////////////////
72process_t * process_alloc( void )
73{
74        kmem_req_t req;
75
76    req.type  = KMEM_KCM;
77        req.order = bits_log2( sizeof(process_t) );
78        req.flags = AF_KERNEL;
79
80    return kmem_alloc( &req );
81}
82
83////////////////////////////////////////
84void process_free( process_t * process )
85{
86    kmem_req_t  req;
87
88        req.type = KMEM_KCM;
89        req.ptr  = process;
90        kmem_free( &req );
91}
92
93////////////////////////////////////////////////////
94error_t process_reference_init( process_t * process,
95                                pid_t       pid,
96                                xptr_t      parent_xp )
97{
98    error_t     error;
99    xptr_t      process_xp;
100    cxy_t       parent_cxy;
101    process_t * parent_ptr;
102    xptr_t      stdin_xp;
103    xptr_t      stdout_xp;
104    xptr_t      stderr_xp;
105    uint32_t    stdin_id;
106    uint32_t    stdout_id;
107    uint32_t    stderr_id;
108    uint32_t    txt_id;
109    char        rx_path[40];
110    char        tx_path[40];
111    xptr_t      file_xp;
112    xptr_t      chdev_xp;
113    chdev_t   * chdev_ptr;
114    cxy_t       chdev_cxy;
115    pid_t       parent_pid;
116    vmm_t     * vmm;
117
118    // build extended pointer on this reference process
119    process_xp = XPTR( local_cxy , process );
120
121    // get pointer on process vmm
122    vmm = &process->vmm;
123
124    // get parent process cluster and local pointer
125    parent_cxy = GET_CXY( parent_xp );
126    parent_ptr = GET_PTR( parent_xp );
127
128    // get parent_pid
129    parent_pid = hal_remote_l32( XPTR( parent_cxy , &parent_ptr->pid ) );
130
131#if DEBUG_PROCESS_REFERENCE_INIT
132thread_t * this = CURRENT_THREAD;
133uint32_t cycle = (uint32_t)hal_get_cycles();
134if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
135printk("\n[%s] thread[%x,%x] enter to initialize process %x / cycle %d\n",
136__FUNCTION__, this->process->pid, this->trdid, pid, cycle );
137#endif
138
139    // initialize pid, ref_xp, parent_xp, owner_xp, term_state fields
140        process->pid        = pid;
141    process->ref_xp     = XPTR( local_cxy , process );
142    process->owner_xp   = XPTR( local_cxy , process );
143    process->parent_xp  = parent_xp;
144    process->term_state = 0;
145
146    // initialize VFS root inode and CWD inode
147    process->vfs_root_xp = hal_remote_l64( XPTR( parent_cxy, &parent_ptr->vfs_root_xp ) );
148    process->cwd_xp      = hal_remote_l64( XPTR( parent_cxy, &parent_ptr->cwd_xp ) );
149
150    // initialize VSL as empty
151    vmm->vsegs_nr = 0;
152        xlist_root_init( XPTR( local_cxy , &vmm->vsegs_root ) );
153
154    // create an empty GPT as required by the architecture
155    error = hal_gpt_create( &vmm->gpt );
156    if( error ) 
157    {
158        printk("\n[ERROR] in %s : cannot create empty GPT\n", __FUNCTION__ );
159        return -1;
160    }
161
162#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
163if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
164printk("\n[%s] thread[%x,%x] created empty GPT for process %x\n",
165__FUNCTION__, parent_pid, this->trdid, pid );
166#endif
167
168    // initialize VSL lock
169        remote_rwlock_init( XPTR( local_cxy , &vmm->vsl_lock ) , LOCK_VMM_VSL );
170
171    // register kernel vsegs in user process VMM as required by the architecture
172    error = hal_vmm_kernel_update( process );
173    if( error ) 
174    {
175        printk("\n[ERROR] in %s : cannot register kernel vsegs in VMM\n", __FUNCTION__ );
176        return -1;
177    }
178
179#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
180if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
181printk("\n[%s] thread[%x,%x] registered kernel vsegs in VSL for process %x\n",
182__FUNCTION__, parent_pid, this->trdid, pid );
183#endif
184
185    // create "args" and "envs" vsegs
186    // create "stacks" and "mmap" vsegs allocators
187    // initialize locks protecting GPT and VSL
188    error = vmm_user_init( process );
189    if( error ) 
190    {
191        printk("\n[ERROR] in %s : cannot register user vsegs in VMM\n", __FUNCTION__ );
192        return -1;
193    }
194 
195#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
196cycle = (uint32_t)hal_get_cycles();
197if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
198printk("\n[%s] thread[%x,%x] initialized vmm for process %x\n", 
199__FUNCTION__, parent_pid, this->trdid, pid );
200#endif
201
202    // initialize fd_array as empty
203    process_fd_init( process );
204
205    // define the stdin/stdout/stderr pseudo files <=> select a TXT terminal.
206    if( (pid == 1) || (parent_pid  == 1) )      // INIT or KSH  process
207    {
208        // select a TXT channel
209        if( pid == 1 )  txt_id = 0;                     // INIT
210        else            txt_id = process_txt_alloc();   // KSH
211
212        // attach process to TXT
213        process_txt_attach( process , txt_id ); 
214
215#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
216cycle = (uint32_t)hal_get_cycles();
217if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
218printk("\n[%s] thread[%x,%x] / process %x attached to TXT%d / cycle %d\n", 
219__FUNCTION__, parent_pid, this->trdid, pid, txt_id, cycle );
220#endif
221        // build path to TXT_RX[i] and TXT_TX[i] chdevs
222        snprintf( rx_path , 40 , "/dev/external/txt%d_rx", txt_id );
223        snprintf( tx_path , 40 , "/dev/external/txt%d_tx", txt_id );
224
225        // create stdin pseudo file         
226        error = vfs_open(  process->vfs_root_xp,
227                           rx_path,
228                           process_xp,
229                           O_RDONLY, 
230                           0,                // FIXME chmod
231                           &stdin_xp, 
232                           &stdin_id );
233        if( error )
234        {
235            printk("\n[ERROR] in %s : cannot open stdout pseudo-file\n", __FUNCTION__ );
236            return -1;
237        }
238
239assert( (stdin_id == 0) , "stdin index must be 0" );
240
241#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
242cycle = (uint32_t)hal_get_cycles();
243if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
244printk("\n[%s] thread[%x,%x] / stdin open for process %x / cycle %d\n", 
245__FUNCTION__, parent_pid, this->trdid, pid, cycle );
246#endif
247
248        // create stdout pseudo file         
249        error = vfs_open(  process->vfs_root_xp,
250                           tx_path,
251                           process_xp,
252                           O_WRONLY, 
253                           0,                // FIXME chmod
254                           &stdout_xp, 
255                           &stdout_id );
256        if( error )
257        {
258            printk("\n[ERROR] in %s : cannot open stdout pseudo-file\n", __FUNCTION__ );
259            return -1;
260        }
261
262assert( (stdout_id == 1) , "stdout index must be 1" );
263
264#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
265cycle = (uint32_t)hal_get_cycles();
266if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
267printk("\n[%s] thread[%x,%x] / stdout open for process %x / cycle %d\n", 
268__FUNCTION__, parent_pid, this->trdid, pid, cycle );
269#endif
270
271        // create stderr pseudo file         
272        error = vfs_open(  process->vfs_root_xp,
273                           tx_path,
274                           process_xp,
275                           O_WRONLY, 
276                           0,                // FIXME chmod
277                           &stderr_xp, 
278                           &stderr_id );
279        if( error )
280        {
281            printk("\n[ERROR] in %s : cannot open stderr pseudo-file\n", __FUNCTION__ );
282            return -1;
283        }
284
285assert( (stderr_id == 2) , "stderr index must be 2" );
286
287#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
288cycle = (uint32_t)hal_get_cycles();
289if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
290printk("\n[%s] thread[%x,%x] / stderr open for process %x / cycle %d\n", 
291__FUNCTION__, parent_pid, this->trdid, pid, cycle );
292#endif
293
294    }
295    else                                            // normal user process
296    {
297        // get extended pointer on stdin pseudo file in parent process
298        file_xp = (xptr_t)hal_remote_l64( XPTR( parent_cxy,
299                                                &parent_ptr->fd_array.array[0] ) );
300
301        // get extended pointer on parent process TXT chdev
302        chdev_xp = chdev_from_file( file_xp );
303 
304        // get cluster and local pointer on chdev
305        chdev_cxy = GET_CXY( chdev_xp );
306        chdev_ptr = GET_PTR( chdev_xp );
307 
308        // get parent process TXT terminal index
309        txt_id = hal_remote_l32( XPTR( chdev_cxy , &chdev_ptr->channel ) );
310
311        // attach child process to parent process TXT terminal
312        process_txt_attach( process , txt_id ); 
313
314        // copy all open files from parent process fd_array to this process
315        process_fd_remote_copy( XPTR( local_cxy , &process->fd_array ),
316                                XPTR( parent_cxy , &parent_ptr->fd_array ) );
317    }
318
319    // initialize lock protecting CWD changes
320    remote_busylock_init( XPTR( local_cxy , 
321                                &process->cwd_lock ), LOCK_PROCESS_CWD );
322
323#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
324cycle = (uint32_t)hal_get_cycles();
325if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
326printk("\n[%s] thread[%x,%x] / set fd_array for process %x / cycle %d\n", 
327__FUNCTION__, parent_pid, this->trdid, pid , cycle );
328#endif
329
330    // reset children list root
331    xlist_root_init( XPTR( local_cxy , &process->children_root ) );
332    process->children_nr     = 0;
333    remote_queuelock_init( XPTR( local_cxy,
334                                 &process->children_lock ), LOCK_PROCESS_CHILDREN );
335
336    // reset semaphore / mutex / barrier / condvar list roots and lock
337    xlist_root_init( XPTR( local_cxy , &process->sem_root ) );
338    xlist_root_init( XPTR( local_cxy , &process->mutex_root ) );
339    xlist_root_init( XPTR( local_cxy , &process->barrier_root ) );
340    xlist_root_init( XPTR( local_cxy , &process->condvar_root ) );
341    remote_queuelock_init( XPTR( local_cxy , 
342                                 &process->sync_lock ), LOCK_PROCESS_USERSYNC );
343
344    // reset open directories root and lock
345    xlist_root_init( XPTR( local_cxy , &process->dir_root ) );
346    remote_queuelock_init( XPTR( local_cxy , 
347                                 &process->dir_lock ), LOCK_PROCESS_DIR );
348
349    // register new process in the local cluster manager pref_tbl[]
350    lpid_t lpid = LPID_FROM_PID( pid );
351    LOCAL_CLUSTER->pmgr.pref_tbl[lpid] = XPTR( local_cxy , process );
352
353    // register new process descriptor in local cluster manager local_list
354    cluster_process_local_link( process );
355
356    // register new process descriptor in local cluster manager copies_list
357    cluster_process_copies_link( process );
358
359    // initialize th_tbl[] array and associated threads
360    uint32_t i;
361
362    for( i = 0 ; i < CONFIG_THREADS_MAX_PER_CLUSTER ; i++ )
363        {
364        process->th_tbl[i] = NULL;
365    }
366    process->th_nr  = 0;
367    rwlock_init( &process->th_lock , LOCK_PROCESS_THTBL );
368
369        hal_fence();
370
371#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
372cycle = (uint32_t)hal_get_cycles();
373if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
374printk("\n[%s] thread[%x,%x] exit for process %x / cycle %d\n", 
375__FUNCTION__, parent_pid, this->trdid, pid, cycle );
376#endif
377
378#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
379hal_vmm_display( parent_xp , false );
380hal_vmm_display( XPTR( local_cxy , process ) , false );
381#endif
382
383    return 0;
384
385}  // process_reference_init()
386
387/////////////////////////////////////////////////////
388error_t process_copy_init( process_t * local_process,
389                           xptr_t      reference_process_xp )
390{
391    error_t   error;
392    vmm_t   * vmm;
393
394    // get reference process cluster and local pointer
395    cxy_t       ref_cxy = GET_CXY( reference_process_xp );
396    process_t * ref_ptr = GET_PTR( reference_process_xp );
397
398    // get pointer on process vmm
399    vmm = &local_process->vmm;
400
401    // initialize PID, REF_XP, PARENT_XP, and STATE
402    local_process->pid        = hal_remote_l32(  XPTR( ref_cxy , &ref_ptr->pid ) );
403    local_process->parent_xp  = hal_remote_l64( XPTR( ref_cxy , &ref_ptr->parent_xp ) );
404    local_process->ref_xp     = reference_process_xp;
405    local_process->owner_xp   = reference_process_xp;
406    local_process->term_state = 0;
407
408#if DEBUG_PROCESS_COPY_INIT
409thread_t * this = CURRENT_THREAD; 
410uint32_t cycle = (uint32_t)hal_get_cycles();
411if( DEBUG_PROCESS_COPY_INIT < cycle )
412printk("\n[%s] thread[%x,%x] enter for process %x / cycle %d\n",
413__FUNCTION__, this->process->pid, this->trdid, local_process->pid, cycle );
414#endif
415
416// check user process
417assert( (local_process->pid != 0), "LPID cannot be 0" );
418
419    // initialize VSL as empty
420    vmm->vsegs_nr = 0;
421        xlist_root_init( XPTR( local_cxy , &vmm->vsegs_root ) );
422
423    // create an empty GPT as required by the architecture
424    error = hal_gpt_create( &vmm->gpt );
425    if( error ) 
426    {
427        printk("\n[ERROR] in %s : cannot create empty GPT\n", __FUNCTION__ );
428        return -1;
429    }
430
431    // initialize GPT and VSL locks
432        remote_rwlock_init( XPTR( local_cxy , &vmm->vsl_lock ) , LOCK_VMM_VSL );
433
434    // register kernel vsegs in VMM as required by the architecture
435    error = hal_vmm_kernel_update( local_process );
436    if( error ) 
437    {
438        printk("\n[ERROR] in %s : cannot register kernel vsegs in VMM\n", __FUNCTION__ );
439        return -1;
440    }
441
442    // create "args" and "envs" vsegs
443    // create "stacks" and "mmap" vsegs allocators
444    // initialize locks protecting GPT and VSL
445    error = vmm_user_init( local_process );
446    if( error ) 
447    {
448        printk("\n[ERROR] in %s : cannot register user vsegs in VMM\n", __FUNCTION__ );
449        return -1;
450    }
451 
452#if (DEBUG_PROCESS_COPY_INIT & 1)
453cycle = (uint32_t)hal_get_cycles();
454if( DEBUG_PROCESS_COPY_INIT < cycle )
455printk("\n[%s] thread[%x,%x] initialized vmm for process %x / cycle %d\n", 
456__FUNCTION__, parent_pid, this->trdid, pid, cycle );
457#endif
458
459    // set process file descriptors array
460        process_fd_init( local_process );
461
462    // set vfs_root_xp / vfs_bin_xp / cwd_xp fields
463    local_process->vfs_root_xp = hal_remote_l64( XPTR( ref_cxy , &ref_ptr->vfs_root_xp ) );
464    local_process->vfs_bin_xp  = hal_remote_l64( XPTR( ref_cxy , &ref_ptr->vfs_bin_xp ) );
465    local_process->cwd_xp      = XPTR_NULL;
466
467    // reset children list root (not used in a process descriptor copy)
468    xlist_root_init( XPTR( local_cxy , &local_process->children_root ) );
469    local_process->children_nr   = 0;
470    remote_queuelock_init( XPTR( local_cxy , &local_process->children_lock ),
471                           LOCK_PROCESS_CHILDREN );
472
473    // reset children_list (not used in a process descriptor copy)
474    xlist_entry_init( XPTR( local_cxy , &local_process->children_list ) );
475
476    // reset semaphores list root (not used in a process descriptor copy)
477    xlist_root_init( XPTR( local_cxy , &local_process->sem_root ) );
478    xlist_root_init( XPTR( local_cxy , &local_process->mutex_root ) );
479    xlist_root_init( XPTR( local_cxy , &local_process->barrier_root ) );
480    xlist_root_init( XPTR( local_cxy , &local_process->condvar_root ) );
481
482    // initialize th_tbl[] array and associated fields
483    uint32_t i;
484    for( i = 0 ; i < CONFIG_THREADS_MAX_PER_CLUSTER ; i++ )
485        {
486        local_process->th_tbl[i] = NULL;
487    }
488    local_process->th_nr  = 0;
489    rwlock_init( &local_process->th_lock , LOCK_PROCESS_THTBL );
490
491    // register new process descriptor in local cluster manager local_list
492    cluster_process_local_link( local_process );
493
494    // register new process descriptor in owner cluster manager copies_list
495    cluster_process_copies_link( local_process );
496
497        hal_fence();
498
499#if DEBUG_PROCESS_COPY_INIT
500cycle = (uint32_t)hal_get_cycles();
501if( DEBUG_PROCESS_COPY_INIT < cycle )
502printk("\n[%s] thread[%x,%x] exit for process %x / cycle %d\n",
503__FUNCTION__, this->process->pid, this->trdid, local_process->pid, cycle );
504#endif
505
506    return 0;
507
508} // end process_copy_init()
509
510///////////////////////////////////////////
511void process_destroy( process_t * process )
512{
513    xptr_t      parent_xp;
514    process_t * parent_ptr;
515    cxy_t       parent_cxy;
516    xptr_t      children_lock_xp;
517    xptr_t      children_nr_xp;
518
519    pid_t       pid = process->pid;
520
521// check no more threads
522assert( (process->th_nr == 0),
523"process %x in cluster %x contains threads", pid , local_cxy );
524
525#if DEBUG_PROCESS_DESTROY
526thread_t * this = CURRENT_THREAD;
527uint32_t cycle = (uint32_t)hal_get_cycles();
528if( DEBUG_PROCESS_DESTROY < cycle )
529printk("\n[%s] thread[%x,%x] enter for process %x in cluster %x / cycle %d\n",
530__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy, cycle );
531#endif
532
533    // Destroy VMM
534    vmm_destroy( process );
535
536#if (DEBUG_PROCESS_DESTROY & 1)
537if( DEBUG_PROCESS_DESTROY < cycle )
538printk("\n[%s] thread[%x,%x] destroyed VMM for process %x in cluster %x\n",
539__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy );
540#endif
541
542    // remove process from local_list in local cluster manager
543    cluster_process_local_unlink( process );
544
545#if (DEBUG_PROCESS_DESTROY & 1)
546if( DEBUG_PROCESS_DESTROY < cycle )
547printk("\n[%s] thread[%x,%x] removed process %x in cluster %x from local list\n",
548__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy );
549#endif
550
551    // remove process from copies_list in owner cluster manager
552    cluster_process_copies_unlink( process );
553
554#if (DEBUG_PROCESS_DESTROY & 1)
555if( DEBUG_PROCESS_DESTROY < cycle )
556printk("\n[%s] thread[%x,%x] removed process %x in cluster %x from copies list\n",
557__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy );
558#endif
559
560    // when target process cluster is the owner cluster
561    // - remove process from TXT list and transfer ownership
562    // - remove process from children_list
563    // - release PID
564    if( CXY_FROM_PID( pid ) == local_cxy )
565    {
566        process_txt_detach( XPTR( local_cxy , process ) );
567
568#if (DEBUG_PROCESS_DESTROY & 1)
569if( DEBUG_PROCESS_DESTROY < cycle )
570printk("\n[%s] thread[%x,%x] removed process %x from TXT list\n",
571__FUNCTION__, this->process->pid, this->trdid, pid );
572#endif
573
574        // get pointers on parent process
575        parent_xp  = process->parent_xp;
576        parent_cxy = GET_CXY( parent_xp );
577        parent_ptr = GET_PTR( parent_xp );
578
579        // get extended pointer on children_lock in parent process
580        children_lock_xp = XPTR( parent_cxy , &parent_ptr->children_lock );
581        children_nr_xp   = XPTR( parent_cxy , &parent_ptr->children_nr );
582
583        // remove process from children_list
584        remote_queuelock_acquire( children_lock_xp );
585        xlist_unlink( XPTR( local_cxy , &process->children_list ) );
586            hal_remote_atomic_add( children_nr_xp , -1 );
587        remote_queuelock_release( children_lock_xp );
588
589#if (DEBUG_PROCESS_DESTROY & 1)
590if( DEBUG_PROCESS_DESTROY < cycle )
591printk("\n[%s] thread[%x,%x] removed process %x from parent process children list\n",
592__FUNCTION__, this->process->pid, this->trdid, pid );
593#endif
594
595        // release the process PID to cluster manager
596        cluster_pid_release( pid );
597
598#if (DEBUG_PROCESS_DESTROY & 1)
599if( DEBUG_PROCESS_DESTROY < cycle )
600printk("\n[%s] thread[%x,%x] released process PID %x to pmgr in cluster %x\n",
601__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy );
602#endif
603
604    }
605
606    // FIXME decrement the refcount on file pointer for vfs_bin_xp [AG]
607
608    // FIXME close all open files [AG]
609
610    // FIXME synchronize dirty files [AG]
611
612    // release memory allocated to process descriptor
613    process_free( process );
614
615#if DEBUG_PROCESS_DESTROY
616cycle = (uint32_t)hal_get_cycles();
617if( DEBUG_PROCESS_DESTROY < cycle )
618printk("\n[%s] thread[%x,%x] exit / process %x in cluster %x / cycle %d\n",
619__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy, cycle );
620#endif
621
622}  // end process_destroy()
623
624///////////////////////////////////////////////////////////////////
625const char * process_action_str( process_sigactions_t action_type )
626{
627    switch ( action_type )
628    {
629        case BLOCK_ALL_THREADS:   return "BLOCK";
630        case UNBLOCK_ALL_THREADS: return "UNBLOCK";
631        case DELETE_ALL_THREADS:  return "DELETE";
632        default:                  return "undefined";
633    }
634}
635
636////////////////////////////////////////
637void process_sigaction( pid_t       pid,
638                        uint32_t    type )
639{
640    cxy_t              owner_cxy;         // owner cluster identifier
641    lpid_t             lpid;              // process index in owner cluster
642    cluster_t        * cluster;           // pointer on cluster manager
643    xptr_t             root_xp;           // extended pointer on root of copies
644    xptr_t             lock_xp;           // extended pointer on lock protecting copies
645    xptr_t             iter_xp;           // iterator on copies list
646    xptr_t             process_xp;        // extended pointer on process copy
647    cxy_t              process_cxy;       // process copy cluster identifier
648    process_t        * process_ptr;       // local pointer on process copy
649    reg_t              save_sr;           // for critical section
650    thread_t         * client;            // pointer on client thread
651    xptr_t             client_xp;         // extended pointer on client thread
652    process_t        * local;             // pointer on process copy in local cluster
653    uint32_t           remote_nr;         // number of remote process copies
654    rpc_desc_t         rpc;               // shared RPC descriptor
655    uint32_t           responses;         // shared RPC responses counter
656
657    client    = CURRENT_THREAD;
658    client_xp = XPTR( local_cxy , client );
659    local     = NULL;
660    remote_nr = 0;
661
662    // check calling thread can yield
663    thread_assert_can_yield( client , __FUNCTION__ );
664
665#if DEBUG_PROCESS_SIGACTION
666uint32_t cycle = (uint32_t)hal_get_cycles();
667if( DEBUG_PROCESS_SIGACTION < cycle )
668printk("\n[%s] thread[%x,%x] enter to %s process %x / cycle %d\n",
669__FUNCTION__ , client->process->pid, client->trdid,
670process_action_str( type ) , pid , cycle );
671#endif
672
673    // get pointer on local cluster manager
674    cluster = LOCAL_CLUSTER;
675
676    // get owner cluster identifier and process lpid
677    owner_cxy = CXY_FROM_PID( pid );
678    lpid      = LPID_FROM_PID( pid );
679
680    // get root of list of copies and lock from owner cluster
681    root_xp   = XPTR( owner_cxy , &cluster->pmgr.copies_root[lpid] );
682    lock_xp   = XPTR( owner_cxy , &cluster->pmgr.copies_lock[lpid] );
683
684// check action type
685assert( ((type == DELETE_ALL_THREADS ) ||
686         (type == BLOCK_ALL_THREADS )  ||
687         (type == UNBLOCK_ALL_THREADS )), "illegal action type" );
688             
689    // This client thread send parallel RPCs to all remote clusters containing
690    // target process copies, wait all responses, and then handles directly
691    // the threads in local cluster, when required.
692    // The client thread allocates a - shared - RPC descriptor in the stack,
693    // because all parallel, non-blocking, server threads use the same input
694    // arguments, and use the shared RPC response field
695
696    // mask IRQs
697    hal_disable_irq( &save_sr);
698
699    // client thread blocks itself
700    thread_block( client_xp , THREAD_BLOCKED_RPC );
701
702    // initialize RPC responses counter
703    responses = 0;
704
705    // initialize shared RPC descriptor
706    // can be shared, because no out arguments
707    rpc.rsp       = &responses;
708    rpc.blocking  = false;
709    rpc.index     = RPC_PROCESS_SIGACTION;
710    rpc.thread    = client;
711    rpc.lid       = client->core->lid;
712    rpc.args[0]   = pid;
713    rpc.args[1]   = type;
714
715    // take the lock protecting process copies
716    remote_queuelock_acquire( lock_xp );
717
718    // scan list of process copies
719    XLIST_FOREACH( root_xp , iter_xp )
720    {
721        // get extended pointers and cluster on process
722        process_xp  = XLIST_ELEMENT( iter_xp , process_t , copies_list );
723        process_cxy = GET_CXY( process_xp );
724        process_ptr = GET_PTR( process_xp );
725
726        if( process_cxy == local_cxy )    // process copy is local
727        { 
728            local = process_ptr;
729        }
730        else                              // process copy is remote
731        {
732            // update number of remote process copies
733            remote_nr++;
734
735            // atomically increment RPC responses counter
736            hal_atomic_add( &responses , 1 );
737
738#if DEBUG_PROCESS_SIGACTION
739if( DEBUG_PROCESS_SIGACTION < cycle )
740printk("\n[%s] thread[%x,%x] send RPC to cluster %x for process %x\n",
741__FUNCTION__, client->process->pid, client->trdid, process_cxy, pid );
742#endif
743            // call RPC in target cluster
744            rpc_send( process_cxy , &rpc );
745        }
746    }  // end list of copies
747
748    // release the lock protecting process copies
749    remote_queuelock_release( lock_xp );
750
751    // restore IRQs
752    hal_restore_irq( save_sr);
753
754    // - if there is remote process copies, the client thread deschedules,
755    //   (it will be unblocked by the last RPC server thread).
756    // - if there is no remote copies, the client thread unblock itself.
757    if( remote_nr )
758    {
759        sched_yield("blocked on rpc_process_sigaction");
760    } 
761    else
762    {
763        thread_unblock( client_xp , THREAD_BLOCKED_RPC );
764    }
765
766    // handle the local process copy if required
767    if( local != NULL )
768    {
769
770#if DEBUG_PROCESS_SIGACTION
771if( DEBUG_PROCESS_SIGACTION < cycle )
772printk("\n[%s] thread[%x,%x] handles local process %x in cluster %x\n",
773__FUNCTION__, client->process->pid, client->trdid, pid , local_cxy );
774#endif
775        if     (type == DELETE_ALL_THREADS  ) process_delete_threads ( local , client_xp ); 
776        else if(type == BLOCK_ALL_THREADS   ) process_block_threads  ( local ); 
777        else if(type == UNBLOCK_ALL_THREADS ) process_unblock_threads( local );
778    }
779
780#if DEBUG_PROCESS_SIGACTION
781cycle = (uint32_t)hal_get_cycles();
782if( DEBUG_PROCESS_SIGACTION < cycle )
783printk("\n[%s] thread[%x,%x] exit after %s process %x / cycle %d\n",
784__FUNCTION__, client->process->pid, client->trdid,
785process_action_str( type ), pid, cycle );
786#endif
787
788}  // end process_sigaction()
789
790/////////////////////////////////////////////////
791void process_block_threads( process_t * process )
792{
793    thread_t          * target;         // pointer on target thread
794    thread_t          * this;           // pointer on calling thread
795    uint32_t            ltid;           // index in process th_tbl[]
796    uint32_t            count;          // requests counter
797    volatile uint32_t   ack_count;      // acknowledges counter
798
799    // get calling thread pointer
800    this = CURRENT_THREAD;
801
802#if DEBUG_PROCESS_SIGACTION
803pid_t pid = process->pid;
804uint32_t cycle = (uint32_t)hal_get_cycles();
805if( DEBUG_PROCESS_SIGACTION < cycle )
806printk("\n[%s] thread[%x,%x] enter for process %x in cluster %x / cycle %d\n",
807__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy , cycle );
808#endif
809
810// check target process is an user process
811assert( (LPID_FROM_PID( process->pid ) != 0 ),
812"process %x is not an user process\n", process->pid );
813
814    // get lock protecting process th_tbl[]
815    rwlock_rd_acquire( &process->th_lock );
816
817    // loop on target process local threads
818    // we use both "ltid" and "count" because it can exist "holes" in th_tbl
819    // - if the calling thread and the target thread are not running on the same
820    //   core, we ask the target scheduler to acknowlege the blocking
821    //   to be sure that the target thread is not running.
822    // - if the calling thread and the target thread are running on the same core,
823    //   we don't need confirmation from scheduler.
824           
825    for( ltid = 0 , count = 0 , ack_count = 0 ; count < process->th_nr ; ltid++ )
826    {
827        target = process->th_tbl[ltid];
828
829        if( target != NULL )                                 // thread exist
830        {
831            count++;
832
833            // set the global blocked bit in target thread descriptor.
834            thread_block( XPTR( local_cxy , target ) , THREAD_BLOCKED_GLOBAL );
835 
836            if( this->core->lid != target->core->lid )
837            {
838                // increment responses counter
839                hal_atomic_add( (void*)&ack_count , 1 );
840
841                // set FLAG_REQ_ACK and &ack_rsp_count in target descriptor
842                thread_set_req_ack( target , (uint32_t *)&ack_count );
843
844                // force scheduling on target thread
845                dev_pic_send_ipi( local_cxy , target->core->lid );
846            }
847        }
848    }
849
850    // release lock protecting process th_tbl[]
851    rwlock_rd_release( &process->th_lock );
852
853    // wait other threads acknowledges  TODO this could be improved...
854    while( 1 )
855    {
856        // exit when all scheduler acknowledges received
857        if ( ack_count == 0 ) break;
858   
859        // wait 1000 cycles before retry
860        hal_fixed_delay( 1000 );
861    }
862
863#if DEBUG_PROCESS_SIGACTION
864cycle = (uint32_t)hal_get_cycles();
865if( DEBUG_PROCESS_SIGACTION < cycle )
866printk("\n[%s] thread[%x,%x] exit for process %x in cluster %x / cycle %d\n",
867__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy , cycle );
868#endif
869
870}  // end process_block_threads()
871
872/////////////////////////////////////////////////
873void process_delete_threads( process_t * process,
874                             xptr_t      client_xp )
875{
876    thread_t          * target;        // local pointer on target thread
877    xptr_t              target_xp;     // extended pointer on target thread
878    cxy_t               owner_cxy;     // owner process cluster
879    uint32_t            ltid;          // index in process th_tbl
880    uint32_t            count;         // threads counter
881
882    // get calling thread pointer
883
884    // get target process owner cluster
885    owner_cxy = CXY_FROM_PID( process->pid );
886
887#if DEBUG_PROCESS_SIGACTION
888thread_t * this  = CURRENT_THREAD;
889uint32_t   cycle = (uint32_t)hal_get_cycles();
890if( DEBUG_PROCESS_SIGACTION < cycle )
891printk("\n[%s] thread[%x,%x] enter for process %x n cluster %x / cycle %d\n",
892__FUNCTION__, this->process->pid, this->trdid, process->pid, local_cxy, cycle );
893#endif
894
895// check target process is an user process
896assert( (LPID_FROM_PID( process->pid ) != 0),
897"process %x is not an user process\n", process->pid );
898
899    // get lock protecting process th_tbl[]
900    rwlock_wr_acquire( &process->th_lock );
901
902    // loop on target process local threads                       
903    // we use both "ltid" and "count" because it can exist "holes" in th_tbl
904    for( ltid = 0 , count = 0  ; count < process->th_nr ; ltid++ )
905    {
906        target = process->th_tbl[ltid];
907
908        if( target != NULL )    // valid thread 
909        {
910            count++;
911            target_xp = XPTR( local_cxy , target );
912
913            // main thread and client thread should not be deleted
914            if( ((ltid != 0) || (owner_cxy != local_cxy)) &&         // not main thread
915                (client_xp) != target_xp )                           // not client thread
916            {
917                // mark target thread for delete and block it
918                thread_delete( target_xp , process->pid , false );   // not forced
919            }
920        }
921    }
922
923    // release lock protecting process th_tbl[]
924    rwlock_wr_release( &process->th_lock );
925
926#if DEBUG_PROCESS_SIGACTION
927cycle = (uint32_t)hal_get_cycles();
928if( DEBUG_PROCESS_SIGACTION < cycle )
929printk("\n[%s] thread[%x,%x] exit for process %x in cluster %x / cycle %d\n",
930__FUNCTION__, this->process->pid, this->trdid, process->pid, local_cxy , cycle );
931#endif
932
933}  // end process_delete_threads()
934
935///////////////////////////////////////////////////
936void process_unblock_threads( process_t * process )
937{
938    thread_t          * target;        // pointer on target thead
939    uint32_t            ltid;          // index in process th_tbl
940    uint32_t            count;         // requests counter
941
942#if DEBUG_PROCESS_SIGACTION
943thread_t * this  = CURRENT_THREAD;
944pid_t      pid   = process->pid;
945uint32_t   cycle = (uint32_t)hal_get_cycles();
946if( DEBUG_PROCESS_SIGACTION < cycle )
947printk("\n[%s] thread[%x,%x] enter for process %x in cluster %x / cycle %d\n",
948__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy , cycle );
949#endif
950
951// check target process is an user process
952assert( ( LPID_FROM_PID( process->pid ) != 0 ),
953"process %x is not an user process\n", process->pid );
954
955    // get lock protecting process th_tbl[]
956    rwlock_rd_acquire( &process->th_lock );
957
958    // loop on process threads to unblock all threads
959    // we use both "ltid" and "count" because it can exist "holes" in th_tbl
960    for( ltid = 0 , count = 0 ; count < process->th_nr ; ltid++ )
961    {
962        target = process->th_tbl[ltid];
963
964        if( target != NULL )             // thread found
965        {
966            count++;
967
968            // reset the global blocked bit in target thread descriptor.
969            thread_unblock( XPTR( local_cxy , target ) , THREAD_BLOCKED_GLOBAL );
970        }
971    }
972
973    // release lock protecting process th_tbl[]
974    rwlock_rd_release( &process->th_lock );
975
976#if DEBUG_PROCESS_SIGACTION
977cycle = (uint32_t)hal_get_cycles();
978if( DEBUG_PROCESS_SIGACTION < cycle )
979printk("\n[%s] thread[%x,%x] exit for process %x in cluster %x / cycle %d\n",
980__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy, cycle );
981#endif
982
983}  // end process_unblock_threads()
984
985///////////////////////////////////////////////
986process_t * process_get_local_copy( pid_t pid )
987{
988    error_t        error;
989    process_t    * process_ptr;   // local pointer on process
990    xptr_t         process_xp;    // extended pointer on process
991
992    cluster_t * cluster = LOCAL_CLUSTER;
993
994#if DEBUG_PROCESS_GET_LOCAL_COPY
995thread_t * this = CURRENT_THREAD;
996uint32_t cycle = (uint32_t)hal_get_cycles();
997if( DEBUG_PROCESS_GET_LOCAL_COPY < cycle )
998printk("\n[%s] thread[%x,%x] enter for process %x in cluster %x / cycle %d\n",
999__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy, cycle );
1000#endif
1001
1002    // get lock protecting local list of processes
1003    remote_queuelock_acquire( XPTR( local_cxy , &cluster->pmgr.local_lock ) );
1004
1005    // scan the local list of process descriptors to find the process
1006    xptr_t  iter;
1007    bool_t  found = false;
1008    XLIST_FOREACH( XPTR( local_cxy , &cluster->pmgr.local_root ) , iter )
1009    {
1010        process_xp  = XLIST_ELEMENT( iter , process_t , local_list );
1011        process_ptr = GET_PTR( process_xp );
1012        if( process_ptr->pid == pid )
1013        {
1014            found = true;
1015            break;
1016        }
1017    }
1018
1019    // release lock protecting local list of processes
1020    remote_queuelock_release( XPTR( local_cxy , &cluster->pmgr.local_lock ) );
1021
1022    // allocate memory for a new local process descriptor
1023    // and initialise it from reference cluster if not found
1024    if( !found )
1025    {
1026        // get extended pointer on reference process descriptor
1027        xptr_t ref_xp = cluster_get_reference_process_from_pid( pid );
1028
1029        assert( (ref_xp != XPTR_NULL) , "illegal pid\n" );
1030
1031        // allocate memory for local process descriptor
1032        process_ptr = process_alloc();
1033
1034        if( process_ptr == NULL )  return NULL;
1035
1036        // initialize local process descriptor copy
1037        error = process_copy_init( process_ptr , ref_xp );
1038
1039        if( error ) return NULL;
1040    }
1041
1042#if DEBUG_PROCESS_GET_LOCAL_COPY
1043cycle = (uint32_t)hal_get_cycles();
1044if( DEBUG_PROCESS_GET_LOCAL_COPY < cycle )
1045printk("\n[%s] thread[%x,%x] exit in cluster %x / process %x / cycle %d\n",
1046__FUNCTION__, this->process->pid, this->trdid, local_cxy, process_ptr, cycle );
1047#endif
1048
1049    return process_ptr;
1050
1051}  // end process_get_local_copy()
1052
1053////////////////////////////////////////////
1054pid_t process_get_ppid( xptr_t  process_xp )
1055{
1056    cxy_t       process_cxy;
1057    process_t * process_ptr;
1058    xptr_t      parent_xp;
1059    cxy_t       parent_cxy;
1060    process_t * parent_ptr;
1061
1062    // get process cluster and local pointer
1063    process_cxy = GET_CXY( process_xp );
1064    process_ptr = GET_PTR( process_xp );
1065
1066    // get pointers on parent process
1067    parent_xp  = (xptr_t)hal_remote_l64( XPTR( process_cxy , &process_ptr->parent_xp ) );
1068    parent_cxy = GET_CXY( parent_xp );
1069    parent_ptr = GET_PTR( parent_xp );
1070
1071    return hal_remote_l32( XPTR( parent_cxy , &parent_ptr->pid ) );
1072}
1073
1074//////////////////////////////////////////////////////////////////////////////////////////
1075// File descriptor array related functions
1076//////////////////////////////////////////////////////////////////////////////////////////
1077
1078///////////////////////////////////////////
1079void process_fd_init( process_t * process )
1080{
1081    uint32_t fd;
1082
1083    // initialize lock
1084    remote_queuelock_init( XPTR( local_cxy , &process->fd_array.lock ), LOCK_PROCESS_FDARRAY );
1085
1086    // initialize number of open files
1087    process->fd_array.current = 0;
1088
1089    // initialize array
1090    for ( fd = 0 ; fd < CONFIG_PROCESS_FILE_MAX_NR ; fd++ )
1091    {
1092        process->fd_array.array[fd] = XPTR_NULL;
1093    }
1094}
1095
1096////////////////////////////////////////////////////
1097error_t process_fd_register( xptr_t      process_xp,
1098                             xptr_t      file_xp,
1099                             uint32_t  * fdid )
1100{
1101    bool_t    found;
1102    uint32_t  id;
1103    xptr_t    xp;
1104
1105    // get reference process cluster and local pointer
1106    process_t * process_ptr = GET_PTR( process_xp );
1107    cxy_t       process_cxy = GET_CXY( process_xp );
1108
1109// check client process is reference process
1110assert( (process_xp == hal_remote_l64( XPTR( process_cxy , &process_ptr->ref_xp ) ) ),
1111"client process must be reference process\n" );
1112
1113#if DEBUG_PROCESS_FD_REGISTER
1114thread_t * this  = CURRENT_THREAD;
1115uint32_t   cycle = (uint32_t)hal_get_cycles();
1116pid_t      pid   = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid) );
1117if( DEBUG_PROCESS_FD_REGISTER < cycle )
1118printk("\n[%s] thread[%x,%x] enter for process %x / cycle %d\n",
1119__FUNCTION__, this->process->pid, this->trdid, pid, cycle );
1120#endif
1121
1122    // build extended pointer on lock protecting reference fd_array
1123    xptr_t lock_xp = XPTR( process_cxy , &process_ptr->fd_array.lock );
1124
1125    // take lock protecting reference fd_array
1126        remote_queuelock_acquire( lock_xp );
1127
1128    found   = false;
1129
1130    for ( id = 0; id < CONFIG_PROCESS_FILE_MAX_NR ; id++ )
1131    {
1132        xp = hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[id] ) );
1133        if ( xp == XPTR_NULL )
1134        {
1135            // update reference fd_array
1136            hal_remote_s64( XPTR( process_cxy , &process_ptr->fd_array.array[id] ) , file_xp );
1137                hal_remote_atomic_add( XPTR( process_cxy , &process_ptr->fd_array.current ) , 1 );
1138
1139            // exit
1140                        *fdid = id;
1141            found = true;
1142            break;
1143        }
1144    }
1145
1146    // release lock protecting fd_array
1147        remote_queuelock_release( lock_xp );
1148
1149#if DEBUG_PROCESS_FD_REGISTER
1150cycle = (uint32_t)hal_get_cycles();
1151if( DEBUG_PROCESS_FD_REGISTER < cycle )
1152printk("\n[%s] thread[%x,%x] exit for process %x / fdid %d / cycle %d\n",
1153__FUNCTION__, this->process->pid, this->trdid, pid, id, cycle );
1154#endif
1155
1156    if ( !found ) return -1;
1157    else          return 0;
1158
1159}  // end process_fd_register()
1160
1161////////////////////////////////////////////////
1162xptr_t process_fd_get_xptr( process_t * process,
1163                            uint32_t    fdid )
1164{
1165    xptr_t  file_xp;
1166    xptr_t  lock_xp;
1167
1168    // access local copy of process descriptor
1169    file_xp = process->fd_array.array[fdid];
1170
1171    if( file_xp == XPTR_NULL )
1172    {
1173        // get reference process cluster and local pointer
1174        xptr_t      ref_xp  = process->ref_xp;
1175        cxy_t       ref_cxy = GET_CXY( ref_xp );
1176        process_t * ref_ptr = GET_PTR( ref_xp );
1177
1178        // build extended pointer on lock protecting reference fd_array
1179        lock_xp = XPTR( ref_cxy , &ref_ptr->fd_array.lock );
1180
1181        // take lock protecting reference fd_array
1182            remote_queuelock_acquire( lock_xp );
1183
1184        // access reference process descriptor
1185        file_xp = hal_remote_l64( XPTR( ref_cxy , &ref_ptr->fd_array.array[fdid] ) );
1186
1187        // update local fd_array if found
1188        if( file_xp != XPTR_NULL )  process->fd_array.array[fdid] = file_xp;
1189       
1190        // release lock protecting reference fd_array
1191            remote_queuelock_release( lock_xp );
1192    }
1193
1194    return file_xp;
1195
1196}  // end process_fd_get_xptr()
1197
1198///////////////////////////////////////////
1199void process_fd_remote_copy( xptr_t dst_xp,
1200                             xptr_t src_xp )
1201{
1202    uint32_t fd;
1203    xptr_t   entry;
1204
1205    // get cluster and local pointer for src fd_array
1206    cxy_t        src_cxy = GET_CXY( src_xp );
1207    fd_array_t * src_ptr = GET_PTR( src_xp );
1208
1209    // get cluster and local pointer for dst fd_array
1210    cxy_t        dst_cxy = GET_CXY( dst_xp );
1211    fd_array_t * dst_ptr = GET_PTR( dst_xp );
1212
1213    // get the remote lock protecting the src fd_array
1214        remote_queuelock_acquire( XPTR( src_cxy , &src_ptr->lock ) );
1215
1216    // loop on all fd_array entries
1217    for( fd = 0 ; fd < CONFIG_PROCESS_FILE_MAX_NR ; fd++ )
1218        {
1219                entry = (xptr_t)hal_remote_l64( XPTR( src_cxy , &src_ptr->array[fd] ) );
1220
1221                if( entry != XPTR_NULL )
1222                {
1223            // increment file descriptor refcount
1224            vfs_file_count_up( entry );
1225
1226                        // copy entry in destination process fd_array
1227                        hal_remote_s64( XPTR( dst_cxy , &dst_ptr->array[fd] ) , entry );
1228                }
1229        }
1230
1231    // release lock on source process fd_array
1232        remote_queuelock_release( XPTR( src_cxy , &src_ptr->lock ) );
1233
1234}  // end process_fd_remote_copy()
1235
1236
1237////////////////////////////////////
1238bool_t process_fd_array_full( void )
1239{
1240    // get extended pointer on reference process
1241    xptr_t ref_xp = CURRENT_THREAD->process->ref_xp;
1242
1243    // get reference process cluster and local pointer
1244    process_t * ref_ptr = GET_PTR( ref_xp );
1245    cxy_t       ref_cxy = GET_CXY( ref_xp );
1246
1247    // get number of open file descriptors from reference fd_array
1248    uint32_t current = hal_remote_l32( XPTR( ref_cxy , &ref_ptr->fd_array.current ) );
1249
1250        return ( current >= CONFIG_PROCESS_FILE_MAX_NR );
1251}
1252
1253
1254////////////////////////////////////////////////////////////////////////////////////
1255//  Thread related functions
1256////////////////////////////////////////////////////////////////////////////////////
1257
1258/////////////////////////////////////////////////////
1259error_t process_register_thread( process_t * process,
1260                                 thread_t  * thread,
1261                                 trdid_t   * trdid )
1262{
1263    ltid_t         ltid;
1264    bool_t         found = false;
1265 
1266// check arguments
1267assert( (process != NULL) , "process argument is NULL" );
1268assert( (thread != NULL) , "thread argument is NULL" );
1269
1270    // get the lock protecting th_tbl for all threads
1271    // but the idle thread executing kernel_init (cannot yield)
1272    if( thread->type != THREAD_IDLE ) rwlock_wr_acquire( &process->th_lock );
1273
1274    // scan th_tbl
1275    for( ltid = 0 ; ltid < CONFIG_THREADS_MAX_PER_CLUSTER ; ltid++ )
1276    {
1277        if( process->th_tbl[ltid] == NULL )
1278        {
1279            found = true;
1280            break;
1281        }
1282    }
1283
1284    if( found )
1285    {
1286        // register thread in th_tbl[]
1287        process->th_tbl[ltid] = thread;
1288        process->th_nr++;
1289
1290        // returns trdid
1291        *trdid = TRDID( local_cxy , ltid );
1292    }
1293
1294    // release the lock protecting th_tbl
1295    if( thread->type != THREAD_IDLE ) rwlock_wr_release( &process->th_lock );
1296
1297    return (found) ? 0 : 0xFFFFFFFF;
1298
1299}  // end process_register_thread()
1300
1301///////////////////////////////////////////////////
1302uint32_t process_remove_thread( thread_t * thread )
1303{
1304    uint32_t count;  // number of threads in local process descriptor
1305
1306// check thread
1307assert( (thread != NULL) , "thread argument is NULL" );
1308
1309    process_t * process = thread->process;
1310
1311    // get thread local index
1312    ltid_t  ltid = LTID_FROM_TRDID( thread->trdid );
1313   
1314    // get the lock protecting th_tbl[]
1315    rwlock_wr_acquire( &process->th_lock );
1316
1317    // get number of threads
1318    count = process->th_nr;
1319
1320// check th_nr value
1321assert( (count > 0) , "process th_nr cannot be 0" );
1322
1323    // remove thread from th_tbl[]
1324    process->th_tbl[ltid] = NULL;
1325    process->th_nr = count-1;
1326
1327    // release lock protecting th_tbl
1328    rwlock_wr_release( &process->th_lock );
1329
1330    return count;
1331
1332}  // end process_remove_thread()
1333
1334/////////////////////////////////////////////////////////
1335error_t process_make_fork( xptr_t      parent_process_xp,
1336                           xptr_t      parent_thread_xp,
1337                           pid_t     * child_pid,
1338                           thread_t ** child_thread )
1339{
1340    process_t * process;         // local pointer on child process descriptor
1341    thread_t  * thread;          // local pointer on child thread descriptor
1342    pid_t       new_pid;         // process identifier for child process
1343    pid_t       parent_pid;      // process identifier for parent process
1344    xptr_t      ref_xp;          // extended pointer on reference process
1345    xptr_t      vfs_bin_xp;      // extended pointer on .elf file
1346    error_t     error;
1347
1348    // get cluster and local pointer for parent process
1349    cxy_t       parent_process_cxy = GET_CXY( parent_process_xp );
1350    process_t * parent_process_ptr = GET_PTR( parent_process_xp );
1351
1352    // get parent process PID and extended pointer on .elf file
1353    parent_pid = hal_remote_l32 (XPTR( parent_process_cxy , &parent_process_ptr->pid));
1354    vfs_bin_xp = hal_remote_l64(XPTR( parent_process_cxy , &parent_process_ptr->vfs_bin_xp));
1355
1356    // get extended pointer on reference process
1357    ref_xp = hal_remote_l64( XPTR( parent_process_cxy , &parent_process_ptr->ref_xp ) );
1358
1359// check parent process is the reference process
1360assert( (parent_process_xp == ref_xp ) ,
1361"parent process must be the reference process" );
1362
1363#if DEBUG_PROCESS_MAKE_FORK
1364uint32_t   cycle;
1365thread_t * this  = CURRENT_THREAD;
1366trdid_t    trdid = this->trdid;
1367pid_t      pid   = this->process->pid;
1368#endif
1369
1370#if( DEBUG_PROCESS_MAKE_FORK & 1 )
1371cycle   = (uint32_t)hal_get_cycles();
1372if( DEBUG_PROCESS_MAKE_FORK < cycle )
1373printk("\n[%s] thread[%x,%x] enter / cluster %x / cycle %d\n",
1374__FUNCTION__, pid, trdid, local_cxy, cycle );
1375#endif
1376
1377    // allocate a process descriptor
1378    process = process_alloc();
1379
1380    if( process == NULL )
1381    {
1382        printk("\n[ERROR] in %s : cannot get process in cluster %x\n", 
1383        __FUNCTION__, local_cxy ); 
1384        return -1;
1385    }
1386
1387    // allocate a child PID from local cluster
1388    error = cluster_pid_alloc( process , &new_pid );
1389    if( error ) 
1390    {
1391        printk("\n[ERROR] in %s : cannot get PID in cluster %x\n", 
1392        __FUNCTION__, local_cxy ); 
1393        process_free( process );
1394        return -1;
1395    }
1396
1397#if( DEBUG_PROCESS_MAKE_FORK & 1 )
1398cycle = (uint32_t)hal_get_cycles();
1399if( DEBUG_PROCESS_MAKE_FORK < cycle )
1400printk("\n[%s] thread[%x,%x] allocated child_process %x / cycle %d\n",
1401__FUNCTION__, pid, trdid, new_pid, cycle );
1402#endif
1403
1404    // initializes child process descriptor from parent process descriptor
1405    error = process_reference_init( process,
1406                                    new_pid,
1407                                    parent_process_xp );
1408    if( error ) 
1409    {
1410        printk("\n[ERROR] in %s : cannot initialize child process in cluster %x\n", 
1411        __FUNCTION__, local_cxy ); 
1412        process_free( process );
1413        return -1;
1414    }
1415
1416#if( DEBUG_PROCESS_MAKE_FORK & 1 )
1417cycle = (uint32_t)hal_get_cycles();
1418if( DEBUG_PROCESS_MAKE_FORK < cycle )
1419printk("\n[%s] thread[%x,%x] initialized child_process %x / cycle %d\n",
1420__FUNCTION__, pid, trdid, new_pid, cycle );
1421#endif
1422
1423    // copy VMM from parent descriptor to child descriptor
1424    error = vmm_fork_copy( process,
1425                           parent_process_xp );
1426    if( error )
1427    {
1428        printk("\n[ERROR] in %s : cannot copy VMM in cluster %x\n", 
1429        __FUNCTION__, local_cxy ); 
1430        process_free( process );
1431        cluster_pid_release( new_pid );
1432        return -1;
1433    }
1434
1435#if( DEBUG_PROCESS_MAKE_FORK & 1 )
1436cycle = (uint32_t)hal_get_cycles();
1437if( DEBUG_PROCESS_MAKE_FORK < cycle )
1438printk("\n[%s] thread[%x,%x] copied VMM from parent to child / cycle %d\n",
1439__FUNCTION__, pid, trdid, cycle );
1440hal_vmm_display( XPTR( local_cxy , process ) , true );
1441#endif
1442
1443    // if parent_process is INIT, or if parent_process is the TXT owner,
1444    // the child_process becomes the owner of its TXT terminal
1445    if( (parent_pid == 1) || process_txt_is_owner( parent_process_xp ) )
1446    {
1447        process_txt_set_ownership( XPTR( local_cxy , process ) );
1448
1449#if( DEBUG_PROCESS_MAKE_FORK & 1 )
1450cycle = (uint32_t)hal_get_cycles();
1451if( DEBUG_PROCESS_MAKE_FORK < cycle )
1452printk("\n[%s] thread[%x,%x] / child_process %x takes TXT ownership / cycle %d\n",
1453__FUNCTION__ , pid, trdid, new_pid, cycle );
1454#endif
1455
1456    }
1457
1458    // update extended pointer on .elf file
1459    process->vfs_bin_xp = vfs_bin_xp;
1460
1461    // create child thread descriptor from parent thread descriptor
1462    error = thread_user_fork( parent_thread_xp,
1463                              process,
1464                              &thread );
1465    if( error )
1466    {
1467        printk("\n[ERROR] in %s : cannot create thread in cluster %x\n",
1468        __FUNCTION__, local_cxy ); 
1469        process_free( process );
1470        cluster_pid_release( new_pid );
1471        return -1;
1472    }
1473
1474// check main thread LTID
1475assert( (LTID_FROM_TRDID(thread->trdid) == 0) ,
1476"main thread must have LTID == 0" );
1477
1478#if( DEBUG_PROCESS_MAKE_FORK & 1 )
1479cycle = (uint32_t)hal_get_cycles();
1480if( DEBUG_PROCESS_MAKE_FORK < cycle )
1481printk("\n[%s] thread[%x,%x] created main thread %x / cycle %d\n", 
1482__FUNCTION__, pid, trdid, thread, cycle );
1483#endif
1484
1485    // set COW flag in DATA, ANON, REMOTE vsegs in parent process VMM
1486    // this includes all parent process copies in all clusters
1487    if( parent_process_cxy == local_cxy )   // reference is local
1488    {
1489        vmm_set_cow( parent_process_ptr );
1490    }
1491    else                                    // reference is remote
1492    {
1493        rpc_vmm_set_cow_client( parent_process_cxy,
1494                                parent_process_ptr );
1495    }
1496
1497    // set COW flag in DATA, ANON, REMOTE vsegs for child process VMM
1498    vmm_set_cow( process );
1499 
1500#if( DEBUG_PROCESS_MAKE_FORK & 1 )
1501cycle = (uint32_t)hal_get_cycles();
1502if( DEBUG_PROCESS_MAKE_FORK < cycle )
1503printk("\n[%s] thread[%x,%x] set COW in DATA / ANON / REMOTE for parent and child / cycle %d\n",
1504__FUNCTION__, pid, trdid, cycle );
1505#endif
1506
1507    // get extended pointers on parent children_root, children_lock and children_nr
1508    xptr_t children_root_xp = XPTR( parent_process_cxy , &parent_process_ptr->children_root );
1509    xptr_t children_lock_xp = XPTR( parent_process_cxy , &parent_process_ptr->children_lock );
1510    xptr_t children_nr_xp   = XPTR( parent_process_cxy , &parent_process_ptr->children_nr   );
1511
1512    // register process in parent children list
1513    remote_queuelock_acquire( children_lock_xp );
1514        xlist_add_last( children_root_xp , XPTR( local_cxy , &process->children_list ) );
1515        hal_remote_atomic_add( children_nr_xp , 1 );
1516    remote_queuelock_release( children_lock_xp );
1517
1518    // return success
1519    *child_thread = thread;
1520    *child_pid    = new_pid;
1521
1522#if DEBUG_PROCESS_MAKE_FORK
1523cycle = (uint32_t)hal_get_cycles();
1524if( DEBUG_PROCESS_MAKE_FORK < cycle )
1525printk("\n[%s] thread[%x,%x] exit / created process %x / cycle %d\n",
1526__FUNCTION__, pid, trdid, new_pid, cycle );
1527#endif
1528
1529    return 0;
1530
1531}   // end process_make_fork()
1532
1533/////////////////////////////////////////////////////
1534error_t process_make_exec( exec_info_t  * exec_info )
1535{
1536    thread_t       * thread;                  // local pointer on this thread
1537    process_t      * process;                 // local pointer on this process
1538    pid_t            pid;                     // this process identifier
1539    xptr_t           ref_xp;                  // reference process for this process
1540        error_t          error;                   // value returned by called functions
1541    char           * path;                    // path to .elf file
1542    xptr_t           file_xp;                 // extended pointer on .elf file descriptor
1543    uint32_t         file_id;                 // file index in fd_array
1544    uint32_t         args_nr;                 // number of main thread arguments
1545    char          ** args_pointers;           // array of pointers on main thread arguments
1546
1547    // get calling thread, process, pid and ref_xp
1548    thread  = CURRENT_THREAD;
1549    process = thread->process;
1550    pid     = process->pid;
1551    ref_xp  = process->ref_xp;
1552
1553        // get relevant infos from exec_info
1554        path          = exec_info->path;
1555    args_nr       = exec_info->args_nr;
1556    args_pointers = exec_info->args_pointers;
1557
1558#if DEBUG_PROCESS_MAKE_EXEC
1559uint32_t cycle = (uint32_t)hal_get_cycles();
1560if( DEBUG_PROCESS_MAKE_EXEC < cycle )
1561printk("\n[%s] thread[%x,%x] enters for %s / cycle %d\n",
1562__FUNCTION__, pid, thread->trdid, path, cycle );
1563#endif
1564
1565    // open the file identified by <path>
1566    file_xp = XPTR_NULL;
1567    file_id = 0xFFFFFFFF;
1568        error   = vfs_open( process->vfs_root_xp,
1569                            path,
1570                        ref_xp,
1571                            O_RDONLY,
1572                            0,
1573                            &file_xp,
1574                            &file_id );
1575        if( error )
1576        {
1577                printk("\n[ERROR] in %s : failed to open file <%s>\n", __FUNCTION__ , path );
1578                return -1;
1579        }
1580
1581#if (DEBUG_PROCESS_MAKE_EXEC & 1)
1582cycle = (uint32_t)hal_get_cycles();
1583if( DEBUG_PROCESS_MAKE_EXEC < cycle )
1584printk("\n[%s] thread[%x,%x] opened file <%s> / cycle %d\n",
1585__FUNCTION__, pid, thread->trdid, path, cycle );
1586#endif
1587
1588    // delete all threads other than this main thread in all clusters
1589    process_sigaction( pid , DELETE_ALL_THREADS );
1590
1591#if (DEBUG_PROCESS_MAKE_EXEC & 1)
1592cycle = (uint32_t)hal_get_cycles();
1593if( DEBUG_PROCESS_MAKE_EXEC < cycle )
1594printk("\n[%s] thread[%x,%x] deleted existing threads / cycle %d\n",
1595__FUNCTION__, pid, thread->trdid, cycle );
1596#endif
1597
1598    // reset calling process VMM
1599    vmm_user_reset( process );
1600
1601#if( DEBUG_PROCESS_MAKE_EXEC & 1 )
1602cycle = (uint32_t)hal_get_cycles();
1603if( DEBUG_PROCESS_MAKE_EXEC < cycle )
1604printk("\n[%s] thread[%x,%x] completed VMM reset / cycle %d\n",
1605__FUNCTION__, pid, thread->trdid, cycle );
1606#endif
1607
1608    // re-initialize the VMM (args/envs vsegs registration)
1609    error = vmm_user_init( process );
1610    if( error )
1611    {
1612        printk("\n[ERROR] in %s : cannot initialise VMM for %s\n", __FUNCTION__ , path );
1613        vfs_close( file_xp , file_id );
1614        // FIXME restore old process VMM [AG]
1615        return -1;
1616    }
1617   
1618#if( DEBUG_PROCESS_MAKE_EXEC & 1 )
1619cycle = (uint32_t)hal_get_cycles();
1620if( DEBUG_PROCESS_MAKE_EXEC < cycle )
1621printk("\n[%s] thread[%x,%x] registered args/envs vsegs / cycle %d\n",
1622__FUNCTION__, pid, thread->trdid, cycle );
1623#endif
1624
1625    // register code & data vsegs as well as entry-point in process VMM,
1626    // and register extended pointer on .elf file in process descriptor
1627        error = elf_load_process( file_xp , process );
1628    if( error )
1629        {
1630                printk("\n[ERROR] in %s : failed to access <%s>\n", __FUNCTION__ , path );
1631        vfs_close( file_xp , file_id );
1632        // FIXME restore old process VMM [AG]
1633        return -1;
1634        }
1635
1636#if( DEBUG_PROCESS_MAKE_EXEC & 1 )
1637cycle = (uint32_t)hal_get_cycles();
1638if( DEBUG_PROCESS_MAKE_EXEC < cycle )
1639printk("\n[%s] thread[%x,%x] registered code/data vsegs / cycle %d\n",
1640__FUNCTION__, pid, thread->trdid, cycle );
1641#endif
1642
1643    // update the existing main thread descriptor... and jump to user code
1644    error = thread_user_exec( (void *)process->vmm.entry_point,
1645                              args_nr,
1646                              args_pointers );
1647    if( error )
1648    {
1649        printk("\n[ERROR] in %s : cannot update main thread for %s\n", __FUNCTION__ , path );
1650        vfs_close( file_xp , file_id );
1651        // FIXME restore old process VMM
1652        return -1;
1653    }
1654
1655    assert( false, "we should not execute this code");
1656 
1657        return 0;
1658
1659}  // end process_make_exec()
1660
1661
1662////////////////////////////////////////////////
1663void process_zero_create( process_t   * process,
1664                          boot_info_t * info )
1665{
1666    error_t error;
1667    pid_t   pid;
1668
1669#if DEBUG_PROCESS_ZERO_CREATE
1670uint32_t cycle = (uint32_t)hal_get_cycles();
1671if( DEBUG_PROCESS_ZERO_CREATE < cycle )
1672printk("\n[%s] enter / cluster %x / cycle %d\n",
1673__FUNCTION__, local_cxy, cycle );
1674#endif
1675
1676    // get pointer on VMM
1677    vmm_t * vmm = &process->vmm;
1678
1679    // get PID from local cluster manager for this kernel process
1680    error = cluster_pid_alloc( process , &pid );
1681
1682    if( error || (LPID_FROM_PID( pid ) != 0) )
1683    {
1684        printk("\n[PANIC] in %s : cannot get valid PID in cluster %x / PID = %x\n",
1685        __FUNCTION__ , local_cxy, pid );
1686        hal_core_sleep();
1687    }
1688
1689#if (DEBUG_PROCESS_ZERO_CREATE & 1)
1690if( DEBUG_PROCESS_ZERO_CREATE < cycle )
1691printk("\n[%s] allocated pid %x in cluster %x\n", __FUNCTION__, pid, local_cxy );
1692#endif
1693
1694    // initialize PID, REF_XP, PARENT_XP, and STATE
1695    // the kernel process_zero is its own parent_process,
1696    // reference_process, and owner_process, and cannot be killed...
1697    process->pid        = pid;
1698    process->ref_xp     = XPTR( local_cxy , process );
1699    process->owner_xp   = XPTR( local_cxy , process );
1700    process->parent_xp  = XPTR( local_cxy , process );
1701    process->term_state = 0;
1702
1703    // initialize VSL as empty
1704    vmm->vsegs_nr = 0;
1705        xlist_root_init( XPTR( local_cxy , &vmm->vsegs_root ) );
1706
1707#if (DEBUG_PROCESS_ZERO_CREATE & 1)
1708if( DEBUG_PROCESS_ZERO_CREATE < cycle )
1709printk("\n[%s] initialized VSL empty in cluster %x\n", __FUNCTION__, local_cxy );
1710#endif
1711
1712    // initialize GPT as empty
1713    error = hal_gpt_create( &vmm->gpt );
1714
1715    if( error ) 
1716    {
1717        printk("\n[PANIC] in %s : cannot create empty GPT\n", __FUNCTION__ );
1718        hal_core_sleep();
1719    }
1720
1721#if (DEBUG_PROCESS_ZERO_CREATE & 1)
1722if( DEBUG_PROCESS_ZERO_CREATE < cycle )
1723printk("\n[%s] initialized GPT empty in cluster %x\n", __FUNCTION__, local_cxy );
1724#endif
1725
1726    // initialize VSL and GPT locks
1727    remote_rwlock_init( XPTR( local_cxy , &vmm->vsl_lock ) , LOCK_VMM_VSL );
1728   
1729    // create kernel vsegs in GPT and VSL, as required by the hardware architecture
1730    error = hal_vmm_kernel_init( info );
1731
1732    if( error ) 
1733    {
1734        printk("\n[PANIC] in %s : cannot create kernel vsegs in VMM\n", __FUNCTION__ );
1735        hal_core_sleep();
1736    }
1737
1738#if (DEBUG_PROCESS_ZERO_CREATE & 1)
1739if( DEBUG_PROCESS_ZERO_CREATE < cycle )
1740printk("\n[%s] initialized hal specific VMM in cluster%x\n", __FUNCTION__, local_cxy );
1741#endif
1742
1743    // reset th_tbl[] array and associated fields
1744    uint32_t i;
1745    for( i = 0 ; i < CONFIG_THREADS_MAX_PER_CLUSTER ; i++ )
1746        {
1747        process->th_tbl[i] = NULL;
1748    }
1749    process->th_nr  = 0;
1750    rwlock_init( &process->th_lock , LOCK_PROCESS_THTBL );
1751
1752#if (DEBUG_PROCESS_ZERO_CREATE & 1)
1753if( DEBUG_PROCESS_ZERO_CREATE < cycle )
1754printk("\n[%s] initialized th_tbl[] in cluster%x\n", __FUNCTION__, local_cxy );
1755#endif
1756
1757    // reset children list as empty
1758    xlist_root_init( XPTR( local_cxy , &process->children_root ) );
1759    process->children_nr = 0;
1760    remote_queuelock_init( XPTR( local_cxy , &process->children_lock ),
1761                           LOCK_PROCESS_CHILDREN );
1762
1763#if (DEBUG_PROCESS_ZERO_CREATE & 1)
1764if( DEBUG_PROCESS_ZERO_CREATE < cycle )
1765printk("\n[%s] initialized children list in cluster%x\n", __FUNCTION__, local_cxy );
1766#endif
1767
1768    // register kernel process in cluster manager local_list
1769    cluster_process_local_link( process );
1770   
1771        hal_fence();
1772
1773#if DEBUG_PROCESS_ZERO_CREATE
1774cycle = (uint32_t)hal_get_cycles();
1775if( DEBUG_PROCESS_ZERO_CREATE < cycle )
1776printk("\n[%s] exit / cluster %x / cycle %d\n",
1777__FUNCTION__, local_cxy, cycle );
1778#endif
1779
1780}  // end process_zero_create()
1781
1782////////////////////////////////
1783void process_init_create( void )
1784{
1785    process_t      * process;       // local pointer on process descriptor
1786    pid_t            pid;           // process_init identifier
1787    thread_t       * thread;        // local pointer on main thread
1788    pthread_attr_t   attr;          // main thread attributes
1789    lid_t            lid;           // selected core local index for main thread
1790    xptr_t           file_xp;       // extended pointer on .elf file descriptor
1791    uint32_t         file_id;       // file index in fd_array
1792    error_t          error;
1793
1794#if DEBUG_PROCESS_INIT_CREATE
1795thread_t * this = CURRENT_THREAD;
1796uint32_t cycle = (uint32_t)hal_get_cycles();
1797if( DEBUG_PROCESS_INIT_CREATE < cycle )
1798printk("\n[%s] thread[%x,%x] enter / cycle %d\n",
1799__FUNCTION__, this->process->pid, this->trdid, cycle );
1800#endif
1801
1802    // allocates memory for process descriptor from local cluster
1803        process = process_alloc(); 
1804
1805
1806    if( process == NULL )
1807    {
1808        printk("\n[PANIC] in %s : cannot allocate process\n", __FUNCTION__ );
1809        hal_core_sleep();
1810    }
1811
1812    // set the CWD and VFS_ROOT fields in process descriptor
1813    process->cwd_xp      = process_zero.vfs_root_xp;
1814    process->vfs_root_xp = process_zero.vfs_root_xp;
1815
1816    // get PID from local cluster
1817    error = cluster_pid_alloc( process , &pid );
1818    if( error ) 
1819    {
1820        printk("\n[PANIC] in %s : cannot allocate PID\n", __FUNCTION__ );
1821        hal_core_sleep();
1822    }
1823    if( pid != 1 ) 
1824    {
1825        printk("\n[PANIC] in %s : process PID must be 0x1\n", __FUNCTION__ );
1826        hal_core_sleep();
1827    }
1828
1829    // initialize process descriptor / parent is local process_zero
1830    error = process_reference_init( process,
1831                                    pid,
1832                                    XPTR( local_cxy , &process_zero ) ); 
1833    if( error )
1834    {
1835        printk("\n[PANIC] in %s : cannot initialize process\n", __FUNCTION__ );
1836        hal_core_sleep();
1837    }
1838
1839#if(DEBUG_PROCESS_INIT_CREATE & 1)
1840if( DEBUG_PROCESS_INIT_CREATE < cycle )
1841printk("\n[%s] thread[%x,%x] initialized process descriptor\n",
1842__FUNCTION__, this->process->pid, this->trdid );
1843#endif
1844
1845    // open the file identified by CONFIG_PROCESS_INIT_PATH
1846    file_xp = XPTR_NULL;
1847    file_id = -1;
1848        error   = vfs_open( process->vfs_root_xp,
1849                            CONFIG_PROCESS_INIT_PATH,
1850                        XPTR( local_cxy , process ),
1851                            O_RDONLY,
1852                            0,
1853                            &file_xp,
1854                            &file_id );
1855    if( error )
1856    {
1857        printk("\n[PANIC] in %s : cannot open file <%s>\n",
1858         __FUNCTION__, CONFIG_PROCESS_INIT_PATH  );
1859        hal_core_sleep();
1860    }
1861
1862#if(DEBUG_PROCESS_INIT_CREATE & 1)
1863if( DEBUG_PROCESS_INIT_CREATE < cycle )
1864printk("\n[%s] thread[%x,%x] open .elf file decriptor\n",
1865__FUNCTION__, this->process->pid, this->trdid );
1866#endif
1867
1868    // register "code" and "data" vsegs as well as entry-point
1869    // in process VMM, using information contained in the elf file.
1870        error = elf_load_process( file_xp , process );
1871
1872    if( error ) 
1873    {
1874        printk("\n[PANIC] in %s : cannot access file <%s>\n",
1875         __FUNCTION__, CONFIG_PROCESS_INIT_PATH  );
1876        hal_core_sleep();
1877    }
1878
1879
1880#if(DEBUG_PROCESS_INIT_CREATE & 1)
1881if( DEBUG_PROCESS_INIT_CREATE < cycle )
1882printk("\n[%s] thread[%x,%x] registered code/data vsegs in VMM\n",
1883__FUNCTION__, this->process->pid, this->trdid );
1884#endif
1885
1886#if (DEBUG_PROCESS_INIT_CREATE & 1)
1887hal_vmm_display( XPTR( local_cxy , process ) , true );
1888#endif
1889
1890    // get extended pointers on process_zero children_root, children_lock
1891    xptr_t children_root_xp = XPTR( local_cxy , &process_zero.children_root );
1892    xptr_t children_lock_xp = XPTR( local_cxy , &process_zero.children_lock );
1893
1894    // take lock protecting kernel process children list
1895    remote_queuelock_acquire( children_lock_xp );
1896
1897    // register process INIT in parent local process_zero
1898        xlist_add_last( children_root_xp , XPTR( local_cxy , &process->children_list ) );
1899        hal_atomic_add( &process_zero.children_nr , 1 );
1900
1901    // release lock protecting kernel process children list
1902    remote_queuelock_release( children_lock_xp );
1903
1904#if(DEBUG_PROCESS_INIT_CREATE & 1)
1905if( DEBUG_PROCESS_INIT_CREATE < cycle )
1906printk("\n[%s] thread[%x,%x] registered init process in parent\n",
1907__FUNCTION__, this->process->pid, this->trdid );
1908#endif
1909
1910    // select a core in local cluster to execute the main thread
1911    lid  = cluster_select_local_core();
1912
1913    // initialize pthread attributes for main thread
1914    attr.attributes = PT_ATTR_DETACH | PT_ATTR_CLUSTER_DEFINED | PT_ATTR_CORE_DEFINED;
1915    attr.cxy        = local_cxy;
1916    attr.lid        = lid;
1917
1918    // create and initialize thread descriptor
1919        error = thread_user_create( pid,
1920                                (void *)process->vmm.entry_point,
1921                                NULL,
1922                                &attr,
1923                                &thread );
1924
1925    if( error )
1926    {
1927        printk("\n[PANIC] in %s : cannot create main thread\n", __FUNCTION__  );
1928        hal_core_sleep();
1929    }
1930    if( thread->trdid != 0 )
1931    {
1932        printk("\n[PANIC] in %s : bad main thread trdid\n", __FUNCTION__  );
1933        hal_core_sleep();
1934    }
1935
1936#if(DEBUG_PROCESS_INIT_CREATE & 1)
1937if( DEBUG_PROCESS_INIT_CREATE < cycle )
1938printk("\n[%s] thread[%x,%x] created main thread\n",
1939__FUNCTION__, this->process->pid, this->trdid );
1940#endif
1941
1942    // activate thread
1943        thread_unblock( XPTR( local_cxy , thread ) , THREAD_BLOCKED_GLOBAL );
1944
1945    hal_fence();
1946
1947#if DEBUG_PROCESS_INIT_CREATE
1948cycle = (uint32_t)hal_get_cycles();
1949if( DEBUG_PROCESS_INIT_CREATE < cycle )
1950printk("\n[%s] thread[%x,%x] exit / cycle %d\n",
1951__FUNCTION__, this->process->pid, this->trdid, cycle );
1952#endif
1953
1954}  // end process_init_create()
1955
1956/////////////////////////////////////////
1957void process_display( xptr_t process_xp )
1958{
1959    process_t   * process_ptr;
1960    cxy_t         process_cxy;
1961
1962    xptr_t        parent_xp;       // extended pointer on parent process
1963    process_t   * parent_ptr;
1964    cxy_t         parent_cxy;
1965
1966    xptr_t        owner_xp;        // extended pointer on owner process
1967    process_t   * owner_ptr;
1968    cxy_t         owner_cxy;
1969
1970    pid_t         pid;
1971    pid_t         ppid;
1972    lpid_t        lpid;
1973    uint32_t      state;
1974    uint32_t      th_nr;
1975
1976    xptr_t        txt_file_xp;     // extended pointer on TXT_RX file descriptor
1977    xptr_t        txt_chdev_xp;    // extended pointer on TXT_RX chdev
1978    chdev_t     * txt_chdev_ptr;
1979    cxy_t         txt_chdev_cxy;
1980    xptr_t        txt_owner_xp;    // extended pointer on TXT owner process
1981
1982    xptr_t        elf_file_xp;     // extended pointer on .elf file
1983    cxy_t         elf_file_cxy;
1984    vfs_file_t  * elf_file_ptr;
1985    vfs_inode_t * elf_inode_ptr;   // local pointer on .elf inode
1986
1987    char          txt_name[CONFIG_VFS_MAX_NAME_LENGTH];
1988    char          elf_name[CONFIG_VFS_MAX_NAME_LENGTH];
1989
1990    // get cluster and local pointer on process
1991    process_ptr = GET_PTR( process_xp );
1992    process_cxy = GET_CXY( process_xp );
1993
1994    // get process PID, LPID, and state
1995    pid   = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid ) );
1996    lpid  = LPID_FROM_PID( pid );
1997    state = hal_remote_l32( XPTR( process_cxy , &process_ptr->term_state ) );
1998
1999    // get process PPID
2000    parent_xp  = hal_remote_l64( XPTR( process_cxy , &process_ptr->parent_xp ) );
2001    parent_cxy = GET_CXY( parent_xp );
2002    parent_ptr = GET_PTR( parent_xp );
2003    ppid       = hal_remote_l32( XPTR( parent_cxy , &parent_ptr->pid ) );
2004
2005    // get number of threads
2006    th_nr      = hal_remote_l32( XPTR( process_cxy , &process_ptr->th_nr ) );
2007
2008    // get pointers on owner process descriptor
2009    owner_xp  = hal_remote_l64( XPTR( process_cxy , &process_ptr->owner_xp ) );
2010    owner_cxy = GET_CXY( owner_xp );
2011    owner_ptr = GET_PTR( owner_xp );
2012
2013    // get process TXT name and .elf name
2014    if( lpid )                                   // user process
2015    {
2016
2017        // get extended pointer on file descriptor associated to TXT_RX
2018        txt_file_xp = hal_remote_l64( XPTR( owner_cxy , &owner_ptr->fd_array.array[0] ) );
2019
2020        assert( (txt_file_xp != XPTR_NULL) ,
2021        "process must be attached to one TXT terminal" ); 
2022
2023        // get TXT_RX chdev pointers
2024        txt_chdev_xp  = chdev_from_file( txt_file_xp );
2025        txt_chdev_cxy = GET_CXY( txt_chdev_xp );
2026        txt_chdev_ptr = GET_PTR( txt_chdev_xp );
2027
2028        // get TXT_RX name and ownership
2029        hal_remote_strcpy( XPTR( local_cxy , txt_name ) ,
2030                           XPTR( txt_chdev_cxy , txt_chdev_ptr->name ) );
2031   
2032        txt_owner_xp = (xptr_t)hal_remote_l64( XPTR( txt_chdev_cxy, 
2033                                                     &txt_chdev_ptr->ext.txt.owner_xp ) );
2034
2035        // get process .elf name
2036        elf_file_xp   = hal_remote_l64( XPTR( process_cxy , &process_ptr->vfs_bin_xp ) );
2037        elf_file_cxy  = GET_CXY( elf_file_xp );
2038        elf_file_ptr  = GET_PTR( elf_file_xp );
2039        elf_inode_ptr = hal_remote_lpt( XPTR( elf_file_cxy , &elf_file_ptr->inode ) );
2040        vfs_inode_get_name( XPTR( elf_file_cxy , elf_inode_ptr ) , elf_name );
2041    }
2042    else                                         // kernel process_zero
2043    {
2044        // TXT name and .elf name are not registered in kernel process_zero
2045        strcpy( txt_name , "txt0_rx" );
2046        txt_owner_xp = process_xp; 
2047        strcpy( elf_name , "kernel.elf" );
2048    }
2049
2050    // display process info
2051    if( txt_owner_xp == process_xp )
2052    {
2053        nolock_printk("PID %X | %s (FG) | %X | PPID %X | TS %X | %d | %s\n", 
2054        pid, txt_name, process_ptr, ppid, state, th_nr, elf_name );
2055    }
2056    else
2057    {
2058        nolock_printk("PID %X | %s (BG) | %X | PPID %X | TS %X | %d | %s\n", 
2059        pid, txt_name, process_ptr, ppid, state, th_nr, elf_name );
2060    }
2061}  // end process_display()
2062
2063
2064////////////////////////////////////////////////////////////////////////////////////////
2065//     Terminals related functions
2066////////////////////////////////////////////////////////////////////////////////////////
2067
2068//////////////////////////////////
2069uint32_t process_txt_alloc( void )
2070{
2071    uint32_t  index;       // TXT terminal index
2072    xptr_t    chdev_xp;    // extended pointer on TXT_RX chdev
2073    chdev_t * chdev_ptr;   // local pointer on TXT_RX chdev
2074    cxy_t     chdev_cxy;   // TXT_RX chdev cluster
2075    xptr_t    root_xp;     // extended pointer on owner field in chdev
2076
2077    // scan the user TXT_RX chdevs (TXT0 is reserved for kernel)
2078    for( index = 1 ; index < LOCAL_CLUSTER->nb_txt_channels ; index ++ )
2079    {
2080        // get pointers on TXT_RX[index]
2081        chdev_xp  = chdev_dir.txt_rx[index];
2082        chdev_cxy = GET_CXY( chdev_xp );
2083        chdev_ptr = GET_PTR( chdev_xp );
2084
2085        // get extended pointer on root of attached process
2086        root_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.root );
2087
2088        // return free TXT index if found
2089        if( xlist_is_empty( root_xp ) ) return index; 
2090    }
2091
2092    assert( false , "no free TXT terminal found" );
2093
2094    return -1;
2095
2096} // end process_txt_alloc()
2097
2098/////////////////////////////////////////////
2099void process_txt_attach( process_t * process,
2100                         uint32_t    txt_id )
2101{
2102    xptr_t      chdev_xp;     // extended pointer on TXT_RX chdev
2103    cxy_t       chdev_cxy;    // TXT_RX chdev cluster
2104    chdev_t *   chdev_ptr;    // local pointer on TXT_RX chdev
2105    xptr_t      root_xp;      // extended pointer on list root in chdev
2106    xptr_t      lock_xp;      // extended pointer on list lock in chdev
2107
2108// check process is in owner cluster
2109assert( (CXY_FROM_PID( process->pid ) == local_cxy) ,
2110"process descriptor not in owner cluster" );
2111
2112// check terminal index
2113assert( (txt_id < LOCAL_CLUSTER->nb_txt_channels) ,
2114"illegal TXT terminal index" );
2115
2116    // get pointers on TXT_RX[txt_id] chdev
2117    chdev_xp  = chdev_dir.txt_rx[txt_id];
2118    chdev_cxy = GET_CXY( chdev_xp );
2119    chdev_ptr = GET_PTR( chdev_xp );
2120
2121    // get extended pointer on root & lock of attached process list
2122    root_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.root );
2123    lock_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.lock );
2124
2125    // get lock protecting list of processes attached to TXT
2126    remote_busylock_acquire( lock_xp );
2127
2128    // insert process in attached process list
2129    xlist_add_last( root_xp , XPTR( local_cxy , &process->txt_list ) );
2130
2131    // release lock protecting list of processes attached to TXT
2132    remote_busylock_release( lock_xp );
2133
2134#if DEBUG_PROCESS_TXT
2135thread_t * this = CURRENT_THREAD;
2136uint32_t cycle = (uint32_t)hal_get_cycles();
2137if( DEBUG_PROCESS_TXT < cycle )
2138printk("\n[%s] thread[%x,%x] attached process %x to TXT %d / cycle %d\n",
2139__FUNCTION__, this->process->pid, this->trdid, process->pid, txt_id , cycle );
2140#endif
2141
2142} // end process_txt_attach()
2143
2144/////////////////////////////////////////////
2145void process_txt_detach( xptr_t  process_xp )
2146{
2147    process_t * process_ptr;  // local pointer on process in owner cluster
2148    cxy_t       process_cxy;  // process owner cluster
2149    pid_t       process_pid;  // process identifier
2150    xptr_t      file_xp;      // extended pointer on stdin file
2151    xptr_t      chdev_xp;     // extended pointer on TXT_RX chdev
2152    cxy_t       chdev_cxy;    // TXT_RX chdev cluster
2153    chdev_t *   chdev_ptr;    // local pointer on TXT_RX chdev
2154    xptr_t      lock_xp;      // extended pointer on list lock in chdev
2155
2156    // get process cluster, local pointer, and PID
2157    process_cxy = GET_CXY( process_xp );
2158    process_ptr = GET_PTR( process_xp );
2159    process_pid = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid ) );
2160
2161// check process descriptor in owner cluster
2162assert( (CXY_FROM_PID( process_pid ) == process_cxy ) ,
2163"process descriptor not in owner cluster" );
2164
2165    // release TXT ownership (does nothing if not TXT owner)
2166    process_txt_transfer_ownership( process_xp );
2167
2168    // get extended pointer on process stdin pseudo file
2169    file_xp = (xptr_t)hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[0] ) );
2170
2171    // get pointers on TXT_RX chdev
2172    chdev_xp  = chdev_from_file( file_xp );
2173    chdev_cxy = GET_CXY( chdev_xp );
2174    chdev_ptr = (chdev_t *)GET_PTR( chdev_xp );
2175
2176    // get extended pointer on lock protecting attached process list
2177    lock_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.lock );
2178
2179    // get lock protecting list of processes attached to TXT
2180    remote_busylock_acquire( lock_xp );
2181
2182    // unlink process from attached process list
2183    xlist_unlink( XPTR( process_cxy , &process_ptr->txt_list ) );
2184
2185    // release lock protecting list of processes attached to TXT
2186    remote_busylock_release( lock_xp );
2187
2188#if DEBUG_PROCESS_TXT
2189thread_t * this = CURRENT_THREAD;
2190uint32_t cycle  = (uint32_t)hal_get_cycles();
2191uint32_t txt_id = hal_remote_l32( XPTR( chdev_cxy , &chdev_ptr->channel ) );
2192if( DEBUG_PROCESS_TXT < cycle )
2193printk("\n[%s] thread[%x,%x] detached process %x from TXT%d / cycle %d\n",
2194__FUNCTION__, this->process->pid, this->trdid, process_pid, txt_id, cycle );
2195#endif
2196
2197} // end process_txt_detach()
2198
2199///////////////////////////////////////////////////
2200void process_txt_set_ownership( xptr_t process_xp )
2201{
2202    process_t * process_ptr;
2203    cxy_t       process_cxy;
2204    pid_t       process_pid;
2205    xptr_t      file_xp;
2206    xptr_t      txt_xp;     
2207    chdev_t   * txt_ptr;
2208    cxy_t       txt_cxy;
2209
2210    // get pointers on process in owner cluster
2211    process_cxy = GET_CXY( process_xp );
2212    process_ptr = GET_PTR( process_xp );
2213    process_pid = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid ) );
2214
2215    // check owner cluster
2216    assert( (process_cxy == CXY_FROM_PID( process_pid )) ,
2217    "process descriptor not in owner cluster" );
2218
2219    // get extended pointer on stdin pseudo file
2220    file_xp = hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[0] ) );
2221
2222    // get pointers on TXT chdev
2223    txt_xp  = chdev_from_file( file_xp );
2224    txt_cxy = GET_CXY( txt_xp );
2225    txt_ptr = GET_PTR( txt_xp );
2226
2227    // set owner field in TXT chdev
2228    hal_remote_s64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , process_xp );
2229
2230#if DEBUG_PROCESS_TXT
2231thread_t * this = CURRENT_THREAD;
2232uint32_t cycle  = (uint32_t)hal_get_cycles();
2233uint32_t txt_id = hal_remote_l32( XPTR( txt_cxy , &txt_ptr->channel ) );
2234if( DEBUG_PROCESS_TXT < cycle )
2235printk("\n[%s] thread[%x,%x] give TXT%d ownership to process %x / cycle %d\n",
2236__FUNCTION__, this->process->pid, this->trdid, txt_id, process_pid, cycle );
2237#endif
2238
2239}  // end process_txt_set ownership()
2240
2241////////////////////////////////////////////////////////
2242void process_txt_transfer_ownership( xptr_t process_xp )
2243{
2244    process_t * process_ptr;     // local pointer on process releasing ownership
2245    cxy_t       process_cxy;     // process cluster
2246    pid_t       process_pid;     // process identifier
2247    xptr_t      file_xp;         // extended pointer on TXT_RX pseudo file
2248    xptr_t      txt_xp;          // extended pointer on TXT_RX chdev
2249    chdev_t   * txt_ptr;         // local pointer on TXT_RX chdev
2250    cxy_t       txt_cxy;         // cluster of TXT_RX chdev
2251    uint32_t    txt_id;          // TXT_RX channel
2252    xptr_t      owner_xp;        // extended pointer on current TXT_RX owner
2253    xptr_t      root_xp;         // extended pointer on root of attached process list
2254    xptr_t      lock_xp;         // extended pointer on lock protecting attached process list
2255    xptr_t      iter_xp;         // iterator for xlist
2256    xptr_t      current_xp;      // extended pointer on current process
2257    bool_t      found;
2258
2259#if DEBUG_PROCESS_TXT
2260thread_t * this  = CURRENT_THREAD;
2261uint32_t   cycle;
2262#endif
2263
2264    // get pointers on target process
2265    process_cxy = GET_CXY( process_xp );
2266    process_ptr = GET_PTR( process_xp );
2267    process_pid = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid ) );
2268
2269// check owner cluster
2270assert( (process_cxy == CXY_FROM_PID( process_pid )) ,
2271"process descriptor not in owner cluster" );
2272
2273    // get extended pointer on stdin pseudo file
2274    file_xp = hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[0] ) );
2275
2276    // get pointers on TXT chdev
2277    txt_xp  = chdev_from_file( file_xp );
2278    txt_cxy = GET_CXY( txt_xp );
2279    txt_ptr = GET_PTR( txt_xp );
2280
2281    // get relevant infos from chdev descriptor
2282    owner_xp = hal_remote_l64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) );
2283    txt_id   = hal_remote_l32( XPTR( txt_cxy , &txt_ptr->channel ) );
2284
2285    // transfer ownership only if target process is the TXT owner
2286    if( (owner_xp == process_xp) && (txt_id > 0) ) 
2287    {
2288        // get extended pointers on root and lock of attached processes list
2289        root_xp = XPTR( txt_cxy , &txt_ptr->ext.txt.root );
2290        lock_xp = XPTR( txt_cxy , &txt_ptr->ext.txt.lock );
2291
2292        if( process_get_ppid( process_xp ) != 1 )       // target process is not KSH
2293        {
2294            // get lock
2295            remote_busylock_acquire( lock_xp );
2296
2297            // scan attached process list to find KSH process
2298            found = false;
2299            for( iter_xp = hal_remote_l64( root_xp ) ;
2300                 (iter_xp != root_xp) && (found == false) ;
2301                 iter_xp = hal_remote_l64( iter_xp ) )
2302            {
2303                current_xp = XLIST_ELEMENT( iter_xp , process_t , txt_list );
2304
2305                if( process_get_ppid( current_xp ) == 1 )  // current is KSH
2306                {
2307                    // set owner field in TXT chdev
2308                    hal_remote_s64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , current_xp );
2309
2310#if DEBUG_PROCESS_TXT
2311cycle = (uint32_t)hal_get_cycles();
2312if( DEBUG_PROCESS_TXT < cycle )
2313printk("\n[%s] thread[%x,%x] transfered TXT%d ownership to KSH / cycle %d\n",
2314__FUNCTION__, this->process->pid, this->trdid, txt_id, cycle );
2315#endif
2316                    found = true;
2317                }
2318            }
2319
2320            // release lock
2321            remote_busylock_release( lock_xp );
2322
2323// It must exist a KSH process for each user TXT channel
2324assert( (found == true), "KSH process not found for TXT%d", txt_id );
2325
2326        }
2327        else                                           // target process is KSH
2328        {
2329            // get lock
2330            remote_busylock_acquire( lock_xp );
2331
2332            // scan attached process list to find another process
2333            found = false;
2334            for( iter_xp = hal_remote_l64( root_xp ) ;
2335                 (iter_xp != root_xp) && (found == false) ;
2336                 iter_xp = hal_remote_l64( iter_xp ) )
2337            {
2338                current_xp  = XLIST_ELEMENT( iter_xp , process_t , txt_list );
2339
2340                if( current_xp != process_xp )            // current is not KSH
2341                {
2342                    // set owner field in TXT chdev
2343                    hal_remote_s64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , current_xp );
2344
2345#if DEBUG_PROCESS_TXT
2346cycle  = (uint32_t)hal_get_cycles();
2347cxy_t       current_cxy = GET_CXY( current_xp );
2348process_t * current_ptr = GET_PTR( current_xp );
2349uint32_t    new_pid     = hal_remote_l32( XPTR( current_cxy , &current_ptr->pid ) );
2350if( DEBUG_PROCESS_TXT < cycle )
2351printk("\n[%s] thread[%x,%x] transfered TXT%d ownership to process %x / cycle %d\n",
2352__FUNCTION__,this->process->pid, this->trdid, txt_id, new_pid, cycle );
2353#endif
2354                    found = true;
2355                }
2356            }
2357
2358            // release lock
2359            remote_busylock_release( lock_xp );
2360
2361            // no more owner for TXT if no other process found
2362            if( found == false )
2363            {
2364                // set owner field in TXT chdev
2365                hal_remote_s64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , XPTR_NULL );
2366
2367#if DEBUG_PROCESS_TXT
2368cycle = (uint32_t)hal_get_cycles();
2369if( DEBUG_PROCESS_TXT < cycle )
2370printk("\n[%s] thread[%x,%x] released TXT%d (no attached process) / cycle %d\n",
2371__FUNCTION__, this->process->pid, this->trdid, txt_id, cycle );
2372#endif
2373            }
2374        }
2375    }
2376    else
2377    {
2378
2379#if DEBUG_PROCESS_TXT
2380cycle = (uint32_t)hal_get_cycles();
2381if( DEBUG_PROCESS_TXT < cycle )
2382printk("\n[%s] thread[%x,%x] does nothing for process %x (not TXT owner) / cycle %d\n",
2383__FUNCTION__, this->process->pid, this->trdid, process_pid, cycle );
2384#endif
2385
2386    }
2387
2388}  // end process_txt_transfer_ownership()
2389
2390
2391////////////////////////////////////////////////
2392bool_t process_txt_is_owner( xptr_t process_xp )
2393{
2394    // get local pointer and cluster of process in owner cluster
2395    cxy_t       process_cxy = GET_CXY( process_xp );
2396    process_t * process_ptr = GET_PTR( process_xp );
2397
2398// check calling thread execute in target process owner cluster
2399pid_t process_pid = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid ) );
2400assert( (process_cxy == CXY_FROM_PID( process_pid )) ,
2401"process descriptor not in owner cluster" );
2402
2403    // get extended pointer on stdin pseudo file
2404    xptr_t file_xp = hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[0] ) );
2405
2406    // get pointers on TXT chdev
2407    xptr_t    txt_xp  = chdev_from_file( file_xp );
2408    cxy_t     txt_cxy = GET_CXY( txt_xp );
2409    chdev_t * txt_ptr = GET_PTR( txt_xp );
2410
2411    // get extended pointer on TXT_RX owner process
2412    xptr_t owner_xp = hal_remote_l64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) );
2413
2414    return (process_xp == owner_xp);
2415
2416}   // end process_txt_is_owner()
2417
2418////////////////////////////////////////////////     
2419xptr_t process_txt_get_owner( uint32_t channel )
2420{
2421    xptr_t      txt_rx_xp  = chdev_dir.txt_rx[channel];
2422    cxy_t       txt_rx_cxy = GET_CXY( txt_rx_xp );
2423    chdev_t *   txt_rx_ptr = GET_PTR( txt_rx_xp );
2424
2425    return (xptr_t)hal_remote_l64( XPTR( txt_rx_cxy , &txt_rx_ptr->ext.txt.owner_xp ) );
2426
2427}  // end process_txt_get_owner()
2428
2429///////////////////////////////////////////
2430void process_txt_display( uint32_t txt_id )
2431{
2432    xptr_t      chdev_xp;
2433    cxy_t       chdev_cxy;
2434    chdev_t   * chdev_ptr;
2435    xptr_t      root_xp;
2436    xptr_t      lock_xp;
2437    xptr_t      current_xp;
2438    xptr_t      iter_xp;
2439    cxy_t       txt0_cxy;
2440    chdev_t   * txt0_ptr;
2441    xptr_t      txt0_xp;
2442    xptr_t      txt0_lock_xp;
2443   
2444    assert( (txt_id < LOCAL_CLUSTER->nb_txt_channels) ,
2445    "illegal TXT terminal index" );
2446
2447    // get pointers on TXT0 chdev
2448    txt0_xp  = chdev_dir.txt_tx[0];
2449    txt0_cxy = GET_CXY( txt0_xp );
2450    txt0_ptr = GET_PTR( txt0_xp );
2451
2452    // get extended pointer on TXT0 lock
2453    txt0_lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock );
2454
2455    // get pointers on TXT_RX[txt_id] chdev
2456    chdev_xp  = chdev_dir.txt_rx[txt_id];
2457    chdev_cxy = GET_CXY( chdev_xp );
2458    chdev_ptr = GET_PTR( chdev_xp );
2459
2460    // get extended pointer on root & lock of attached process list
2461    root_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.root );
2462    lock_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.lock );
2463
2464    // get lock on attached process list
2465    remote_busylock_acquire( lock_xp );
2466
2467    // get TXT0 lock in busy waiting mode
2468    remote_busylock_acquire( txt0_lock_xp );
2469
2470    // display header
2471    nolock_printk("\n***** processes attached to TXT_%d / cycle %d\n",
2472    txt_id , (uint32_t)hal_get_cycles() );
2473
2474    // scan attached process list
2475    XLIST_FOREACH( root_xp , iter_xp )
2476    {
2477        current_xp  = XLIST_ELEMENT( iter_xp , process_t , txt_list );
2478        process_display( current_xp );
2479    }
2480
2481    // release TXT0 lock in busy waiting mode
2482    remote_busylock_release( txt0_lock_xp );
2483
2484    // release lock on attached process list
2485    remote_busylock_release( lock_xp );
2486
2487}  // end process_txt_display
Note: See TracBrowser for help on using the repository browser.