source: trunk/kernel/kern/process.c @ 675

Last change on this file since 675 was 669, checked in by alain, 4 years ago

1) Introduce up to 4 command lines arguments in the KSH "load" command.
These arguments are transfered to the user process through the
argc/argv mechanism, using the user space "args" vseg.

2) Introduce the named and anonymous "pipes", for inter-process communication
through the pipe() and mkfifo() syscalls.

3) Introduce the "chat" application to validate the two above mechanisms.

4) Improve printk() and assert() fonctions in printk.c.

File size: 107.0 KB
Line 
1/*
2 * process.c - process related functions definition.
3 *
4 * Authors  Ghassan Almaless (2008,2009,2010,2011,2012)
5 *          Mohamed Lamine Karaoui (2015)
6 *          Alain Greiner (2016,2017,2018,2019,2020)
7 *
8 * Copyright (c) UPMC Sorbonne Universites
9 *
10 * This file is part of ALMOS-MKH.
11 *
12 * ALMOS-MKH is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; version 2.0 of the License.
15 *
16 * ALMOS-MKH is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with ALMOS-MKH; if not, write to the Free Software Foundation,
23 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24 */
25
26#include <kernel_config.h>
27#include <hal_kernel_types.h>
28#include <hal_remote.h>
29#include <hal_uspace.h>
30#include <hal_irqmask.h>
31#include <hal_vmm.h>
32#include <errno.h>
33#include <printk.h>
34#include <memcpy.h>
35#include <bits.h>
36#include <kmem.h>
37#include <page.h>
38#include <vmm.h>
39#include <vfs.h>
40#include <core.h>
41#include <thread.h>
42#include <chdev.h>
43#include <ksocket.h>
44#include <list.h>
45#include <string.h>
46#include <scheduler.h>
47#include <busylock.h>
48#include <queuelock.h>
49#include <remote_queuelock.h>
50#include <rwlock.h>
51#include <remote_rwlock.h>
52#include <dqdt.h>
53#include <cluster.h>
54#include <ppm.h>
55#include <boot_info.h>
56#include <process.h>
57#include <elf.h>
58#include <syscalls.h>
59#include <shared_syscalls.h>
60
61//////////////////////////////////////////////////////////////////////////////////////////
62// Extern global variables
63//////////////////////////////////////////////////////////////////////////////////////////
64
65extern process_t           process_zero;     // allocated in kernel_init.c
66extern chdev_directory_t   chdev_dir;        // allocated in kernel_init.c
67
68//////////////////////////////////////////////////////////////////////////////////////////
69// Process initialisation related functions
70//////////////////////////////////////////////////////////////////////////////////////////
71
72/////////////////////////////////
73process_t * process_alloc( void )
74{
75
76assert( __FUNCTION__, (sizeof(process_t) < CONFIG_PPM_PAGE_SIZE),
77"process descriptor exceeds 1 page" );
78
79        kmem_req_t req;
80
81    req.type  = KMEM_PPM;
82        req.order = 0;
83        req.flags = AF_KERNEL | AF_ZERO;
84    return kmem_alloc( &req );
85}
86
87////////////////////////////////////////
88void process_free( process_t * process )
89{
90    kmem_req_t  req;
91
92        req.type = KMEM_PPM;
93        req.ptr  = process;
94        kmem_free( &req );
95}
96
97////////////////////////////////////////////////////
98error_t process_reference_init( process_t * process,
99                                pid_t       pid,
100                                xptr_t      parent_xp )
101{
102    error_t     error;
103    xptr_t      process_xp;
104    cxy_t       parent_cxy;
105    process_t * parent_ptr;
106    xptr_t      stdin_xp;
107    xptr_t      stdout_xp;
108    xptr_t      stderr_xp;
109    uint32_t    stdin_id;
110    uint32_t    stdout_id;
111    uint32_t    stderr_id;
112    uint32_t    txt_id;
113    char        rx_path[40];
114    char        tx_path[40];
115    pid_t       parent_pid;
116    vmm_t     * vmm;
117
118    // build extended pointer on this reference process
119    process_xp = XPTR( local_cxy , process );
120
121    // get pointer on process vmm
122    vmm = &process->vmm;
123
124    // get parent process cluster and local pointer
125    parent_cxy = GET_CXY( parent_xp );
126    parent_ptr = GET_PTR( parent_xp );
127
128    // get parent_pid
129    parent_pid = hal_remote_l32( XPTR( parent_cxy , &parent_ptr->pid ) );
130
131#if DEBUG_PROCESS_REFERENCE_INIT
132thread_t * this = CURRENT_THREAD;
133uint32_t cycle = (uint32_t)hal_get_cycles();
134if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
135printk("\n[%s] thread[%x,%x] enter to initialize process %x / cycle %d\n",
136__FUNCTION__, this->process->pid, this->trdid, pid, cycle );
137#endif
138
139    // initialize pid, ref_xp, parent_xp, owner_xp, term_state fields
140        process->pid        = pid;
141    process->ref_xp     = XPTR( local_cxy , process );
142    process->owner_xp   = XPTR( local_cxy , process );
143    process->parent_xp  = parent_xp;
144    process->term_state = 0;
145
146    // initialize VFS root inode and CWD inode
147    process->vfs_root_xp = hal_remote_l64( XPTR( parent_cxy, &parent_ptr->vfs_root_xp ) );
148    process->cwd_xp      = hal_remote_l64( XPTR( parent_cxy, &parent_ptr->cwd_xp ) );
149
150    // initialize VSL as empty
151    vmm->vsegs_nr = 0;
152        xlist_root_init( XPTR( local_cxy , &vmm->vsegs_root ) );
153
154    // create an empty GPT as required by the architecture
155    error = hal_gpt_create( &vmm->gpt );
156    if( error ) 
157    {
158        printk("\n[ERROR] in %s : cannot create empty GPT\n", __FUNCTION__ );
159        return -1;
160    }
161
162#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
163if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
164printk("\n[%s] thread[%x,%x] created empty GPT for process %x\n",
165__FUNCTION__, parent_pid, this->trdid, pid );
166#endif
167
168    // initialize VSL lock
169        remote_rwlock_init( XPTR( local_cxy , &vmm->vsl_lock ) , LOCK_VMM_VSL );
170
171    // register kernel vsegs in user process VMM as required by the architecture
172    error = hal_vmm_kernel_update( process );
173    if( error ) 
174    {
175        printk("\n[ERROR] in %s : cannot register kernel vsegs in VMM\n", __FUNCTION__ );
176        return -1;
177    }
178
179#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
180if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
181printk("\n[%s] thread[%x,%x] registered kernel vsegs in VSL for process %x\n",
182__FUNCTION__, parent_pid, this->trdid, pid );
183#endif
184
185    // create "args" and "envs" vsegs
186    // create "stacks" and "mmap" vsegs allocators
187    // initialize locks protecting GPT and VSL
188    error = vmm_user_init( process );
189    if( error ) 
190    {
191        printk("\n[ERROR] in %s : cannot register user vsegs in VMM\n", __FUNCTION__ );
192        return -1;
193    }
194 
195#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
196cycle = (uint32_t)hal_get_cycles();
197if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
198printk("\n[%s] thread[%x,%x] initialized vmm for process %x\n", 
199__FUNCTION__, parent_pid, this->trdid, pid );
200#endif
201
202    // initialize fd_array as empty
203    process_fd_init( process );
204
205    // define the stdin/stdout/stderr pseudo files <=> select a TXT terminal.
206    if( (pid == 1) || (parent_pid  == 1) )      // INIT or KSH  process
207    {
208        // select a TXT channel
209        if( pid == 1 )  txt_id = 0;                     // INIT
210        else            txt_id = process_txt_alloc();   // KSH
211
212        // attach process to TXT
213        process_txt_attach( process_xp , txt_id ); 
214
215#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
216cycle = (uint32_t)hal_get_cycles();
217if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
218printk("\n[%s] thread[%x,%x] / process %x attached to TXT%d / cycle %d\n", 
219__FUNCTION__, parent_pid, this->trdid, pid, txt_id, cycle );
220#endif
221        // build path to TXT_RX[i] and TXT_TX[i] chdevs
222        snprintk( rx_path , 40 , "/dev/external/txt%d_rx", txt_id );
223        snprintk( tx_path , 40 , "/dev/external/txt%d_tx", txt_id );
224
225        // create stdin pseudo file         
226        error = vfs_open(  process->vfs_root_xp,
227                           rx_path,
228                           process_xp,
229                           O_RDONLY, 
230                           0,                // FIXME chmod
231                           &stdin_xp, 
232                           &stdin_id );
233        if( error )
234        {
235            printk("\n[ERROR] in %s : cannot open stdout pseudo-file\n", __FUNCTION__ );
236            return -1;
237        }
238
239assert( __FUNCTION__, (stdin_id == 0) , "stdin index must be 0" );
240
241#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
242cycle = (uint32_t)hal_get_cycles();
243if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
244printk("\n[%s] thread[%x,%x] / stdin open for process %x / cycle %d\n", 
245__FUNCTION__, parent_pid, this->trdid, pid, cycle );
246#endif
247
248        // create stdout pseudo file         
249        error = vfs_open(  process->vfs_root_xp,
250                           tx_path,
251                           process_xp,
252                           O_WRONLY, 
253                           0,                // FIXME chmod
254                           &stdout_xp, 
255                           &stdout_id );
256        if( error )
257        {
258            printk("\n[ERROR] in %s : cannot open stdout pseudo-file\n", __FUNCTION__ );
259            return -1;
260        }
261
262assert( __FUNCTION__, (stdout_id == 1) , "stdout index must be 1" );
263
264#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
265cycle = (uint32_t)hal_get_cycles();
266if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
267printk("\n[%s] thread[%x,%x] / stdout open for process %x / cycle %d\n", 
268__FUNCTION__, parent_pid, this->trdid, pid, cycle );
269#endif
270
271        // create stderr pseudo file         
272        error = vfs_open(  process->vfs_root_xp,
273                           tx_path,
274                           process_xp,
275                           O_WRONLY, 
276                           0,                // FIXME chmod
277                           &stderr_xp, 
278                           &stderr_id );
279        if( error )
280        {
281            printk("\n[ERROR] in %s : cannot open stderr pseudo-file\n", __FUNCTION__ );
282            return -1;
283        }
284
285assert( __FUNCTION__, (stderr_id == 2) , "stderr index must be 2" );
286
287#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
288cycle = (uint32_t)hal_get_cycles();
289if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
290printk("\n[%s] thread[%x,%x] / stderr open for process %x / cycle %d\n", 
291__FUNCTION__, parent_pid, this->trdid, pid, cycle );
292#endif
293
294    }
295    else                                            // normal user process
296    {
297        // get parent process TXT index
298        txt_id = process_txt_get_index( parent_xp );
299
300        // attach child process to same TXT terminal as parent
301        process_txt_attach( process_xp , txt_id ); 
302
303        // recreate all open files from parent process fd_array to child process fd_array
304        process_fd_replicate( process_xp , parent_xp );
305    }
306
307    // initialize lock protecting CWD changes
308    remote_busylock_init( XPTR( local_cxy , &process->cwd_lock ), LOCK_PROCESS_CWD );
309
310#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
311cycle = (uint32_t)hal_get_cycles();
312if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
313printk("\n[%s] thread[%x,%x] / set fd_array for process %x / cycle %d\n", 
314__FUNCTION__, parent_pid, this->trdid, pid , cycle );
315#endif
316
317    // reset children list root
318    xlist_root_init( XPTR( local_cxy , &process->children_root ) );
319    process->children_nr     = 0;
320    remote_queuelock_init( XPTR( local_cxy,
321                                 &process->children_lock ), LOCK_PROCESS_CHILDREN );
322
323    // reset semaphore / mutex / barrier / condvar list roots and lock
324    xlist_root_init( XPTR( local_cxy , &process->sem_root ) );
325    xlist_root_init( XPTR( local_cxy , &process->mutex_root ) );
326    xlist_root_init( XPTR( local_cxy , &process->barrier_root ) );
327    xlist_root_init( XPTR( local_cxy , &process->condvar_root ) );
328    remote_queuelock_init( XPTR( local_cxy , 
329                                 &process->sync_lock ), LOCK_PROCESS_USERSYNC );
330
331    // reset open directories root and lock
332    xlist_root_init( XPTR( local_cxy , &process->dir_root ) );
333    remote_queuelock_init( XPTR( local_cxy , 
334                                 &process->dir_lock ), LOCK_PROCESS_DIR );
335
336    // register new process in the local cluster manager pref_tbl[]
337    lpid_t lpid = LPID_FROM_PID( pid );
338    LOCAL_CLUSTER->pmgr.pref_tbl[lpid] = XPTR( local_cxy , process );
339
340    // register new process descriptor in local cluster manager local_list
341    cluster_process_local_link( process );
342
343    // register new process descriptor in local cluster manager copies_list
344    cluster_process_copies_link( process );
345
346    // initialize th_tbl[] array and associated threads
347    uint32_t i;
348
349    for( i = 0 ; i < CONFIG_THREADS_MAX_PER_CLUSTER ; i++ )
350        {
351        process->th_tbl[i] = NULL;
352    }
353    process->th_nr  = 0;
354    rwlock_init( &process->th_lock , LOCK_PROCESS_THTBL );
355
356        hal_fence();
357
358#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
359cycle = (uint32_t)hal_get_cycles();
360if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
361printk("\n[%s] thread[%x,%x] exit for process %x / cycle %d\n", 
362__FUNCTION__, parent_pid, this->trdid, pid, cycle );
363#endif
364
365#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
366hal_vmm_display( parent_xp , false );
367hal_vmm_display( XPTR( local_cxy , process ) , false );
368#endif
369
370    return 0;
371
372}  // process_reference_init()
373
374/////////////////////////////////////////////////////
375error_t process_copy_init( process_t * local_process,
376                           xptr_t      reference_process_xp )
377{
378    error_t   error;
379    vmm_t   * vmm;
380
381    // get reference process cluster and local pointer
382    cxy_t       ref_cxy = GET_CXY( reference_process_xp );
383    process_t * ref_ptr = GET_PTR( reference_process_xp );
384
385    // get pointer on process vmm
386    vmm = &local_process->vmm;
387
388    // initialize PID, REF_XP, PARENT_XP, and STATE
389    local_process->pid        = hal_remote_l32(  XPTR( ref_cxy , &ref_ptr->pid ) );
390    local_process->parent_xp  = hal_remote_l64( XPTR( ref_cxy , &ref_ptr->parent_xp ) );
391    local_process->ref_xp     = reference_process_xp;
392    local_process->owner_xp   = reference_process_xp;
393    local_process->term_state = 0;
394
395#if DEBUG_PROCESS_COPY_INIT
396thread_t * this = CURRENT_THREAD; 
397uint32_t cycle = (uint32_t)hal_get_cycles();
398if( DEBUG_PROCESS_COPY_INIT < cycle )
399printk("\n[%s] thread[%x,%x] enter for process %x / cycle %d\n",
400__FUNCTION__, this->process->pid, this->trdid, local_process->pid, cycle );
401#endif
402
403// check user process
404assert( __FUNCTION__, (local_process->pid != 0), "LPID cannot be 0" );
405
406    // initialize VSL as empty
407    vmm->vsegs_nr = 0;
408        xlist_root_init( XPTR( local_cxy , &vmm->vsegs_root ) );
409
410    // create an empty GPT as required by the architecture
411    error = hal_gpt_create( &vmm->gpt );
412    if( error ) 
413    {
414        printk("\n[ERROR] in %s : cannot create empty GPT\n", __FUNCTION__ );
415        return -1;
416    }
417
418    // initialize GPT and VSL locks
419        remote_rwlock_init( XPTR( local_cxy , &vmm->vsl_lock ) , LOCK_VMM_VSL );
420
421    // register kernel vsegs in VMM as required by the architecture
422    error = hal_vmm_kernel_update( local_process );
423    if( error ) 
424    {
425        printk("\n[ERROR] in %s : cannot register kernel vsegs in VMM\n", __FUNCTION__ );
426        return -1;
427    }
428
429    // create "args" and "envs" vsegs
430    // create "stacks" and "mmap" vsegs allocators
431    // initialize locks protecting GPT and VSL
432    error = vmm_user_init( local_process );
433    if( error ) 
434    {
435        printk("\n[ERROR] in %s : cannot register user vsegs in VMM\n", __FUNCTION__ );
436        return -1;
437    }
438 
439#if (DEBUG_PROCESS_COPY_INIT & 1)
440cycle = (uint32_t)hal_get_cycles();
441if( DEBUG_PROCESS_COPY_INIT < cycle )
442printk("\n[%s] thread[%x,%x] initialized vmm for process %x / cycle %d\n", 
443__FUNCTION__, parent_pid, this->trdid, pid, cycle );
444#endif
445
446    // set process file descriptors array
447        process_fd_init( local_process );
448
449    // set vfs_root_xp / vfs_bin_xp / cwd_xp fields
450    local_process->vfs_root_xp = hal_remote_l64( XPTR( ref_cxy , &ref_ptr->vfs_root_xp ) );
451    local_process->vfs_bin_xp  = hal_remote_l64( XPTR( ref_cxy , &ref_ptr->vfs_bin_xp ) );
452    local_process->cwd_xp      = XPTR_NULL;
453
454    // reset children list root (not used in a process descriptor copy)
455    xlist_root_init( XPTR( local_cxy , &local_process->children_root ) );
456    local_process->children_nr   = 0;
457    remote_queuelock_init( XPTR( local_cxy , &local_process->children_lock ),
458                           LOCK_PROCESS_CHILDREN );
459
460    // reset children_list (not used in a process descriptor copy)
461    xlist_entry_init( XPTR( local_cxy , &local_process->children_list ) );
462
463    // reset semaphores list root (not used in a process descriptor copy)
464    xlist_root_init( XPTR( local_cxy , &local_process->sem_root ) );
465    xlist_root_init( XPTR( local_cxy , &local_process->mutex_root ) );
466    xlist_root_init( XPTR( local_cxy , &local_process->barrier_root ) );
467    xlist_root_init( XPTR( local_cxy , &local_process->condvar_root ) );
468
469    // initialize th_tbl[] array and associated fields
470    uint32_t i;
471    for( i = 0 ; i < CONFIG_THREADS_MAX_PER_CLUSTER ; i++ )
472        {
473        local_process->th_tbl[i] = NULL;
474    }
475    local_process->th_nr  = 0;
476    rwlock_init( &local_process->th_lock , LOCK_PROCESS_THTBL );
477
478    // register new process descriptor in local cluster manager local_list
479    cluster_process_local_link( local_process );
480
481    // register new process descriptor in owner cluster manager copies_list
482    cluster_process_copies_link( local_process );
483
484        hal_fence();
485
486#if DEBUG_PROCESS_COPY_INIT
487cycle = (uint32_t)hal_get_cycles();
488if( DEBUG_PROCESS_COPY_INIT < cycle )
489printk("\n[%s] thread[%x,%x] exit for process %x / cycle %d\n",
490__FUNCTION__, this->process->pid, this->trdid, local_process->pid, cycle );
491#endif
492
493    return 0;
494
495} // end process_copy_init()
496
497///////////////////////////////////////////
498void process_destroy( process_t * process )
499{
500    xptr_t      parent_xp;
501    process_t * parent_ptr;
502    cxy_t       parent_cxy;
503    xptr_t      children_lock_xp;
504    xptr_t      children_nr_xp;
505
506    pid_t       pid = process->pid;
507
508// check no more threads
509assert( __FUNCTION__, (process->th_nr == 0),
510"process %x in cluster %x contains threads", pid , local_cxy );
511
512#if DEBUG_PROCESS_DESTROY
513thread_t * this = CURRENT_THREAD;
514uint32_t cycle = (uint32_t)hal_get_cycles();
515if( DEBUG_PROCESS_DESTROY < cycle )
516printk("\n[%s] thread[%x,%x] enter for process %x in cluster %x / cycle %d\n",
517__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy, cycle );
518#endif
519
520    // Destroy VMM
521    vmm_destroy( process );
522
523#if (DEBUG_PROCESS_DESTROY & 1)
524if( DEBUG_PROCESS_DESTROY < cycle )
525printk("\n[%s] thread[%x,%x] destroyed VMM for process %x in cluster %x\n",
526__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy );
527#endif
528
529    // remove process from local_list in local cluster manager
530    cluster_process_local_unlink( process );
531
532#if (DEBUG_PROCESS_DESTROY & 1)
533if( DEBUG_PROCESS_DESTROY < cycle )
534printk("\n[%s] thread[%x,%x] removed process %x in cluster %x from local list\n",
535__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy );
536#endif
537
538    // remove process from copies_list in owner cluster manager
539    cluster_process_copies_unlink( process );
540
541#if (DEBUG_PROCESS_DESTROY & 1)
542if( DEBUG_PROCESS_DESTROY < cycle )
543printk("\n[%s] thread[%x,%x] removed process %x in cluster %x from copies list\n",
544__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy );
545#endif
546
547    // when target process cluster is the owner cluster
548    // - remove process from TXT list and transfer ownership
549    // - remove process from children_list
550    // - release PID
551    if( CXY_FROM_PID( pid ) == local_cxy )
552    {
553        process_txt_detach( XPTR( local_cxy , process ) );
554
555#if (DEBUG_PROCESS_DESTROY & 1)
556if( DEBUG_PROCESS_DESTROY < cycle )
557printk("\n[%s] thread[%x,%x] removed process %x from TXT list\n",
558__FUNCTION__, this->process->pid, this->trdid, pid );
559#endif
560
561        // get pointers on parent process
562        parent_xp  = process->parent_xp;
563        parent_cxy = GET_CXY( parent_xp );
564        parent_ptr = GET_PTR( parent_xp );
565
566        // get extended pointer on children_lock in parent process
567        children_lock_xp = XPTR( parent_cxy , &parent_ptr->children_lock );
568        children_nr_xp   = XPTR( parent_cxy , &parent_ptr->children_nr );
569
570        // remove process from children_list
571        remote_queuelock_acquire( children_lock_xp );
572        xlist_unlink( XPTR( local_cxy , &process->children_list ) );
573            hal_remote_atomic_add( children_nr_xp , -1 );
574        remote_queuelock_release( children_lock_xp );
575
576#if (DEBUG_PROCESS_DESTROY & 1)
577if( DEBUG_PROCESS_DESTROY < cycle )
578printk("\n[%s] thread[%x,%x] removed process %x from parent process children list\n",
579__FUNCTION__, this->process->pid, this->trdid, pid );
580#endif
581
582        // release the process PID to cluster manager
583        cluster_pid_release( pid );
584
585#if (DEBUG_PROCESS_DESTROY & 1)
586if( DEBUG_PROCESS_DESTROY < cycle )
587printk("\n[%s] thread[%x,%x] released process PID %x to pmgr in cluster %x\n",
588__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy );
589#endif
590
591    }
592
593    // FIXME decrement the refcount on file pointer for vfs_bin_xp [AG]
594
595    // FIXME close all open files [AG]
596
597    // FIXME synchronize dirty files [AG]
598
599    // release memory allocated to process descriptor
600    process_free( process );
601
602#if DEBUG_PROCESS_DESTROY
603cycle = (uint32_t)hal_get_cycles();
604if( DEBUG_PROCESS_DESTROY < cycle )
605printk("\n[%s] thread[%x,%x] exit for process %x in cluster %x / cycle %d\n",
606__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy, cycle );
607#endif
608
609}  // end process_destroy()
610
611///////////////////////////////////////////////////////////////////
612const char * process_action_str( process_sigactions_t action_type )
613{
614    switch ( action_type )
615    {
616        case BLOCK_ALL_THREADS:   return "BLOCK";
617        case UNBLOCK_ALL_THREADS: return "UNBLOCK";
618        case DELETE_ALL_THREADS:  return "DELETE";
619        default:                  return "undefined";
620    }
621}
622
623////////////////////////////////////////
624void process_sigaction( pid_t       pid,
625                        uint32_t    type )
626{
627    cxy_t              owner_cxy;         // owner cluster identifier
628    lpid_t             lpid;              // process index in owner cluster
629    cluster_t        * cluster;           // pointer on cluster manager
630    xptr_t             root_xp;           // extended pointer on root of copies
631    xptr_t             lock_xp;           // extended pointer on lock protecting copies
632    xptr_t             iter_xp;           // iterator on copies list
633    xptr_t             process_xp;        // extended pointer on process copy
634    cxy_t              process_cxy;       // process copy cluster identifier
635    process_t        * process_ptr;       // local pointer on process copy
636    reg_t              save_sr;           // for critical section
637    thread_t         * client;            // pointer on client thread
638    xptr_t             client_xp;         // extended pointer on client thread
639    process_t        * local;             // pointer on process copy in local cluster
640    uint32_t           remote_nr;         // number of remote process copies
641    rpc_desc_t         rpc;               // shared RPC descriptor
642    uint32_t           responses;         // shared RPC responses counter
643
644    client    = CURRENT_THREAD;
645    client_xp = XPTR( local_cxy , client );
646    local     = NULL;
647    remote_nr = 0;
648
649    // check calling thread can yield
650    thread_assert_can_yield( client , __FUNCTION__ );
651
652#if DEBUG_PROCESS_SIGACTION
653uint32_t cycle = (uint32_t)hal_get_cycles();
654if( DEBUG_PROCESS_SIGACTION < cycle )
655printk("\n[%s] thread[%x,%x] enter to %s process %x / cycle %d\n",
656__FUNCTION__ , client->process->pid, client->trdid,
657process_action_str( type ) , pid , cycle );
658#endif
659
660    // get pointer on local cluster manager
661    cluster = LOCAL_CLUSTER;
662
663    // get owner cluster identifier and process lpid
664    owner_cxy = CXY_FROM_PID( pid );
665    lpid      = LPID_FROM_PID( pid );
666
667    // get root of list of copies and lock from owner cluster
668    root_xp   = XPTR( owner_cxy , &cluster->pmgr.copies_root[lpid] );
669    lock_xp   = XPTR( owner_cxy , &cluster->pmgr.copies_lock[lpid] );
670
671// check action type
672assert( __FUNCTION__, ((type == DELETE_ALL_THREADS ) ||
673         (type == BLOCK_ALL_THREADS )  ||
674         (type == UNBLOCK_ALL_THREADS )), "illegal action type" );
675             
676    // This client thread send parallel RPCs to all remote clusters containing
677    // target process copies, wait all responses, and then handles directly
678    // the threads in local cluster, when required.
679    // The client thread allocates a - shared - RPC descriptor in the stack,
680    // because all parallel, non-blocking, server threads use the same input
681    // arguments, and use the shared RPC response field
682
683    // mask IRQs
684    hal_disable_irq( &save_sr);
685
686    // client thread blocks itself
687    thread_block( client_xp , THREAD_BLOCKED_RPC );
688
689    // initialize RPC responses counter
690    responses = 0;
691
692    // initialize shared RPC descriptor
693    // can be shared, because no out arguments
694    rpc.rsp       = &responses;
695    rpc.blocking  = false;
696    rpc.index     = RPC_PROCESS_SIGACTION;
697    rpc.thread    = client;
698    rpc.lid       = client->core->lid;
699    rpc.args[0]   = pid;
700    rpc.args[1]   = type;
701
702    // take the lock protecting process copies
703    remote_queuelock_acquire( lock_xp );
704
705    // scan list of process copies
706    XLIST_FOREACH( root_xp , iter_xp )
707    {
708        // get extended pointers and cluster on process
709        process_xp  = XLIST_ELEMENT( iter_xp , process_t , copies_list );
710        process_cxy = GET_CXY( process_xp );
711        process_ptr = GET_PTR( process_xp );
712
713        if( process_cxy == local_cxy )    // process copy is local
714        { 
715            local = process_ptr;
716        }
717        else                              // process copy is remote
718        {
719            // update number of remote process copies
720            remote_nr++;
721
722            // atomically increment RPC responses counter
723            hal_atomic_add( &responses , 1 );
724
725#if DEBUG_PROCESS_SIGACTION
726if( DEBUG_PROCESS_SIGACTION < cycle )
727printk("\n[%s] thread[%x,%x] send RPC to cluster %x for process %x\n",
728__FUNCTION__, client->process->pid, client->trdid, process_cxy, pid );
729#endif
730            // call RPC in target cluster
731            rpc_send( process_cxy , &rpc );
732        }
733    }  // end list of copies
734
735    // release the lock protecting process copies
736    remote_queuelock_release( lock_xp );
737
738    // restore IRQs
739    hal_restore_irq( save_sr);
740
741    // - if there is remote process copies, the client thread deschedules,
742    //   (it will be unblocked by the last RPC server thread).
743    // - if there is no remote copies, the client thread unblock itself.
744    if( remote_nr )
745    {
746        sched_yield("blocked on rpc_process_sigaction");
747    } 
748    else
749    {
750        thread_unblock( client_xp , THREAD_BLOCKED_RPC );
751    }
752
753    // handle the local process copy if required
754    if( local != NULL )
755    {
756
757#if DEBUG_PROCESS_SIGACTION
758if( DEBUG_PROCESS_SIGACTION < cycle )
759printk("\n[%s] thread[%x,%x] handles local process %x in cluster %x\n",
760__FUNCTION__, client->process->pid, client->trdid, pid , local_cxy );
761#endif
762        if     (type == DELETE_ALL_THREADS  ) process_delete_threads ( local , client_xp ); 
763        else if(type == BLOCK_ALL_THREADS   ) process_block_threads  ( local ); 
764        else if(type == UNBLOCK_ALL_THREADS ) process_unblock_threads( local );
765    }
766
767#if DEBUG_PROCESS_SIGACTION
768cycle = (uint32_t)hal_get_cycles();
769if( DEBUG_PROCESS_SIGACTION < cycle )
770printk("\n[%s] thread[%x,%x] exit after %s process %x / cycle %d\n",
771__FUNCTION__, client->process->pid, client->trdid,
772process_action_str( type ), pid, cycle );
773#endif
774
775}  // end process_sigaction()
776
777/////////////////////////////////////////////////
778void process_block_threads( process_t * process )
779{
780    thread_t          * target;         // pointer on target thread
781    thread_t          * this;           // pointer on calling thread
782    uint32_t            ltid;           // index in process th_tbl[]
783    uint32_t            count;          // requests counter
784    volatile uint32_t   ack_count;      // acknowledges counter
785
786    // get calling thread pointer
787    this = CURRENT_THREAD;
788
789#if DEBUG_PROCESS_SIGACTION
790pid_t pid = process->pid;
791uint32_t cycle = (uint32_t)hal_get_cycles();
792if( DEBUG_PROCESS_SIGACTION < cycle )
793printk("\n[%s] thread[%x,%x] enter for process %x in cluster %x / cycle %d\n",
794__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy , cycle );
795#endif
796
797// check target process is an user process
798assert( __FUNCTION__, (LPID_FROM_PID( process->pid ) != 0 ),
799"process %x is not an user process\n", process->pid );
800
801    // get lock protecting process th_tbl[]
802    rwlock_rd_acquire( &process->th_lock );
803
804    // loop on target process local threads
805    // we use both "ltid" and "count" because it can exist "holes" in th_tbl
806    // - if the calling thread and the target thread are not running on the same
807    //   core, we ask the target scheduler to acknowlege the blocking
808    //   to be sure that the target thread is not running.
809    // - if the calling thread and the target thread are running on the same core,
810    //   we don't need confirmation from scheduler.
811           
812    for( ltid = 0 , count = 0 , ack_count = 0 ; count < process->th_nr ; ltid++ )
813    {
814        target = process->th_tbl[ltid];
815
816        if( target != NULL )                                 // thread exist
817        {
818            count++;
819
820            // set the global blocked bit in target thread descriptor.
821            thread_block( XPTR( local_cxy , target ) , THREAD_BLOCKED_GLOBAL );
822 
823            if( this->core->lid != target->core->lid )
824            {
825                // increment responses counter
826                hal_atomic_add( (void*)&ack_count , 1 );
827
828                // set FLAG_REQ_ACK and &ack_rsp_count in target descriptor
829                thread_set_req_ack( target , (uint32_t *)&ack_count );
830
831                // force scheduling on target thread
832                dev_pic_send_ipi( local_cxy , target->core->lid );
833            }
834        }
835    }
836
837    // release lock protecting process th_tbl[]
838    rwlock_rd_release( &process->th_lock );
839
840    // wait other threads acknowledges  TODO this could be improved...
841    while( 1 )
842    {
843        // exit when all scheduler acknowledges received
844        if ( ack_count == 0 ) break;
845   
846        // wait 1000 cycles before retry
847        hal_fixed_delay( 1000 );
848    }
849
850#if DEBUG_PROCESS_SIGACTION
851cycle = (uint32_t)hal_get_cycles();
852if( DEBUG_PROCESS_SIGACTION < cycle )
853printk("\n[%s] thread[%x,%x] exit for process %x in cluster %x / cycle %d\n",
854__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy , cycle );
855#endif
856
857}  // end process_block_threads()
858
859/////////////////////////////////////////////////
860void process_delete_threads( process_t * process,
861                             xptr_t      client_xp )
862{
863    thread_t          * target;        // local pointer on target thread
864    xptr_t              target_xp;     // extended pointer on target thread
865    cxy_t               owner_cxy;     // owner process cluster
866    uint32_t            ltid;          // index in process th_tbl
867    uint32_t            count;         // threads counter
868
869    // get calling thread pointer
870
871    // get target process owner cluster
872    owner_cxy = CXY_FROM_PID( process->pid );
873
874#if DEBUG_PROCESS_SIGACTION
875thread_t * this  = CURRENT_THREAD;
876uint32_t   cycle = (uint32_t)hal_get_cycles();
877if( DEBUG_PROCESS_SIGACTION < cycle )
878printk("\n[%s] thread[%x,%x] enter for process %x n cluster %x / cycle %d\n",
879__FUNCTION__, this->process->pid, this->trdid, process->pid, local_cxy, cycle );
880#endif
881
882// check target process is an user process
883assert( __FUNCTION__, (LPID_FROM_PID( process->pid ) != 0),
884"process %x is not an user process\n", process->pid );
885
886    // get lock protecting process th_tbl[]
887    rwlock_wr_acquire( &process->th_lock );
888
889    // loop on target process local threads                       
890    // we use both "ltid" and "count" because it can exist "holes" in th_tbl
891    for( ltid = 0 , count = 0  ; count < process->th_nr ; ltid++ )
892    {
893        target = process->th_tbl[ltid];
894
895        if( target != NULL )    // valid thread 
896        {
897            count++;
898            target_xp = XPTR( local_cxy , target );
899
900            // main thread and client thread should not be deleted
901            if( ((ltid != 0) || (owner_cxy != local_cxy)) &&         // not main thread
902                (client_xp) != target_xp )                           // not client thread
903            {
904                // mark target thread for delete and block it
905                thread_delete_request( target_xp , true );                   // forced
906            }
907        }
908    }
909
910    // release lock protecting process th_tbl[]
911    rwlock_wr_release( &process->th_lock );
912
913#if DEBUG_PROCESS_SIGACTION
914cycle = (uint32_t)hal_get_cycles();
915if( DEBUG_PROCESS_SIGACTION < cycle )
916printk("\n[%s] thread[%x,%x] exit for process %x in cluster %x / cycle %d\n",
917__FUNCTION__, this->process->pid, this->trdid, process->pid, local_cxy , cycle );
918#endif
919
920}  // end process_delete_threads()
921
922///////////////////////////////////////////////////
923void process_unblock_threads( process_t * process )
924{
925    thread_t          * target;        // pointer on target thead
926    uint32_t            ltid;          // index in process th_tbl
927    uint32_t            count;         // requests counter
928
929#if DEBUG_PROCESS_SIGACTION
930thread_t * this  = CURRENT_THREAD;
931pid_t      pid   = process->pid;
932uint32_t   cycle = (uint32_t)hal_get_cycles();
933if( DEBUG_PROCESS_SIGACTION < cycle )
934printk("\n[%s] thread[%x,%x] enter for process %x in cluster %x / cycle %d\n",
935__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy , cycle );
936#endif
937
938// check target process is an user process
939assert( __FUNCTION__, ( LPID_FROM_PID( process->pid ) != 0 ),
940"process %x is not an user process\n", process->pid );
941
942    // get lock protecting process th_tbl[]
943    rwlock_rd_acquire( &process->th_lock );
944
945    // loop on process threads to unblock all threads
946    // we use both "ltid" and "count" because it can exist "holes" in th_tbl
947    for( ltid = 0 , count = 0 ; count < process->th_nr ; ltid++ )
948    {
949        target = process->th_tbl[ltid];
950
951        if( target != NULL )             // thread found
952        {
953            count++;
954
955            // reset the global blocked bit in target thread descriptor.
956            thread_unblock( XPTR( local_cxy , target ) , THREAD_BLOCKED_GLOBAL );
957        }
958    }
959
960    // release lock protecting process th_tbl[]
961    rwlock_rd_release( &process->th_lock );
962
963#if DEBUG_PROCESS_SIGACTION
964cycle = (uint32_t)hal_get_cycles();
965if( DEBUG_PROCESS_SIGACTION < cycle )
966printk("\n[%s] thread[%x,%x] exit for process %x in cluster %x / cycle %d\n",
967__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy, cycle );
968#endif
969
970}  // end process_unblock_threads()
971
972///////////////////////////////////////////////
973process_t * process_get_local_copy( pid_t pid )
974{
975    error_t        error;
976    process_t    * process_ptr;   // local pointer on process
977    xptr_t         process_xp;    // extended pointer on process
978
979    cluster_t * cluster = LOCAL_CLUSTER;
980
981#if DEBUG_PROCESS_GET_LOCAL_COPY
982thread_t * this = CURRENT_THREAD;
983uint32_t cycle = (uint32_t)hal_get_cycles();
984if( DEBUG_PROCESS_GET_LOCAL_COPY < cycle )
985printk("\n[%s] thread[%x,%x] enter for process %x in cluster %x / cycle %d\n",
986__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy, cycle );
987#endif
988
989    // get lock protecting local list of processes
990    remote_queuelock_acquire( XPTR( local_cxy , &cluster->pmgr.local_lock ) );
991
992    // scan the local list of process descriptors to find the process
993    xptr_t  iter;
994    bool_t  found = false;
995    XLIST_FOREACH( XPTR( local_cxy , &cluster->pmgr.local_root ) , iter )
996    {
997        process_xp  = XLIST_ELEMENT( iter , process_t , local_list );
998        process_ptr = GET_PTR( process_xp );
999        if( process_ptr->pid == pid )
1000        {
1001            found = true;
1002            break;
1003        }
1004    }
1005
1006    // release lock protecting local list of processes
1007    remote_queuelock_release( XPTR( local_cxy , &cluster->pmgr.local_lock ) );
1008
1009    // allocate memory for a new local process descriptor
1010    // and initialise it from reference cluster if not found
1011    if( !found )
1012    {
1013        // get extended pointer on reference process descriptor
1014        xptr_t ref_xp = cluster_get_reference_process_from_pid( pid );
1015
1016        assert( __FUNCTION__, (ref_xp != XPTR_NULL) , "illegal pid\n" );
1017
1018        // allocate memory for local process descriptor
1019        process_ptr = process_alloc();
1020
1021        if( process_ptr == NULL )  return NULL;
1022
1023        // initialize local process descriptor copy
1024        error = process_copy_init( process_ptr , ref_xp );
1025
1026        if( error ) return NULL;
1027    }
1028
1029#if DEBUG_PROCESS_GET_LOCAL_COPY
1030cycle = (uint32_t)hal_get_cycles();
1031if( DEBUG_PROCESS_GET_LOCAL_COPY < cycle )
1032printk("\n[%s] thread[%x,%x] exit in cluster %x / process %x / cycle %d\n",
1033__FUNCTION__, this->process->pid, this->trdid, local_cxy, process_ptr, cycle );
1034#endif
1035
1036    return process_ptr;
1037
1038}  // end process_get_local_copy()
1039
1040////////////////////////////////////////////
1041pid_t process_get_ppid( xptr_t  process_xp )
1042{
1043    cxy_t       process_cxy;
1044    process_t * process_ptr;
1045    xptr_t      parent_xp;
1046    cxy_t       parent_cxy;
1047    process_t * parent_ptr;
1048
1049    // get process cluster and local pointer
1050    process_cxy = GET_CXY( process_xp );
1051    process_ptr = GET_PTR( process_xp );
1052
1053    // get pointers on parent process
1054    parent_xp  = (xptr_t)hal_remote_l64( XPTR( process_cxy , &process_ptr->parent_xp ) );
1055    parent_cxy = GET_CXY( parent_xp );
1056    parent_ptr = GET_PTR( parent_xp );
1057
1058    return hal_remote_l32( XPTR( parent_cxy , &parent_ptr->pid ) );
1059}
1060
1061//////////////////////////////////////////////////////////////////////////////////////////
1062// File descriptor array related functions
1063//////////////////////////////////////////////////////////////////////////////////////////
1064
1065///////////////////////////////////////////
1066char * process_fd_type_str( uint32_t type )
1067{
1068    switch( type )
1069    {
1070        case FILE_TYPE_REG : return "FILE";
1071        case FILE_TYPE_DIR  : return "DIR";
1072        case FILE_TYPE_FIFO : return "FIFO";
1073        case FILE_TYPE_PIPE : return "PIPE";
1074        case FILE_TYPE_SOCK : return "SOCK";
1075        case FILE_TYPE_DEV  : return "DEV";
1076        case FILE_TYPE_BLK  : return "BLK";
1077        case FILE_TYPE_SYML : return "SYML";
1078       
1079        default              : return "undefined";
1080    }
1081}
1082   
1083///////////////////////////////////////////
1084void process_fd_init( process_t * process )
1085{
1086    uint32_t fd;
1087
1088    // initialize lock
1089    remote_queuelock_init( XPTR( local_cxy , &process->fd_array.lock ), LOCK_PROCESS_FDARRAY );
1090
1091    // initialize number of open files
1092    process->fd_array.max = 0;
1093
1094    // initialize array
1095    for ( fd = 0 ; fd < CONFIG_PROCESS_FILE_MAX_NR ; fd++ )
1096    {
1097        process->fd_array.array[fd] = XPTR_NULL;
1098    }
1099}
1100
1101////////////////////////////////////////////////////
1102error_t process_fd_register( xptr_t      process_xp,
1103                             xptr_t      file_xp,
1104                             uint32_t  * fdid )
1105{
1106    bool_t    found;
1107    uint32_t  id;
1108    uint32_t  max;             // current value of max non-free slot index
1109    xptr_t    entry_xp;        // current value of one fd_array entry
1110    xptr_t    lock_xp;         // extended pointer on lock protecting fd_array
1111    xptr_t    max_xp;          // extended pointer on max field in fd_array
1112
1113    // get target process cluster and local pointer
1114    process_t * process_ptr = GET_PTR( process_xp );
1115    cxy_t       process_cxy = GET_CXY( process_xp );
1116
1117// check target process is owner process
1118assert( __FUNCTION__, (process_xp == hal_remote_l64( XPTR( process_cxy , &process_ptr->owner_xp ) ) ),
1119"process must be owner process\n" );
1120
1121#if DEBUG_PROCESS_FD_REGISTER
1122thread_t * this  = CURRENT_THREAD;
1123uint32_t   cycle = (uint32_t)hal_get_cycles();
1124pid_t      pid   = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid) );
1125if( DEBUG_PROCESS_FD_REGISTER < cycle )
1126printk("\n[%s] thread[%x,%x] enter for process %x / cycle %d\n",
1127__FUNCTION__, this->process->pid, this->trdid, pid, cycle );
1128#endif
1129
1130    // build extended pointers on lock & max
1131    lock_xp = XPTR( process_cxy , &process_ptr->fd_array.lock );
1132    max_xp  = XPTR( process_cxy , &process_ptr->fd_array.max );
1133
1134    // take lock protecting fd_array
1135        remote_queuelock_acquire( lock_xp );
1136
1137    found   = false;
1138
1139    // get current value of max_fdid
1140    max = hal_remote_l32( max_xp );
1141
1142    for ( id = 0; id < CONFIG_PROCESS_FILE_MAX_NR ; id++ )
1143    {
1144        // get fd_array entry
1145        entry_xp = hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[id] ) );
1146       
1147        // take the first empty slot
1148        if ( entry_xp == XPTR_NULL )
1149        {
1150            // update  fd_array
1151            hal_remote_s64( XPTR( process_cxy , &process_ptr->fd_array.array[id] ) , file_xp );
1152
1153            // update max when required
1154            if( id > max ) hal_remote_s32( max_xp , id );
1155
1156            // exit loop
1157                        *fdid = id;
1158            found = true;
1159            break;
1160        }
1161    }
1162
1163    // release lock protecting fd_array
1164        remote_queuelock_release( lock_xp );
1165
1166#if DEBUG_PROCESS_FD_REGISTER
1167cycle = (uint32_t)hal_get_cycles();
1168if( DEBUG_PROCESS_FD_REGISTER < cycle )
1169printk("\n[%s] thread[%x,%x] exit for process %x / fdid %d / cycle %d\n",
1170__FUNCTION__, this->process->pid, this->trdid, pid, id, cycle );
1171#endif
1172
1173    if ( !found ) return -1;
1174    else          return 0;
1175
1176}  // end process_fd_register()
1177
1178/////////////////////////////////////////////
1179void process_fd_remove( xptr_t    process_xp,
1180                        uint32_t  fdid )
1181{
1182    pid_t       pid;           // target process PID
1183    lpid_t      lpid;          // target process LPID
1184    xptr_t      file_xp;       // extended pointer on file descriptor
1185    xptr_t      iter_xp;       // iterator for list of process copies
1186    xptr_t      copy_xp;       // extended pointer on process copy
1187    process_t * copy_ptr;      // local pointer on process copy 
1188    cxy_t       copy_cxy;      // process copy cluster identifier
1189
1190    // get target process cluster and local pointer
1191    process_t * process_ptr = GET_PTR( process_xp );
1192    cxy_t       process_cxy = GET_CXY( process_xp );
1193
1194// check target process is owner process
1195assert( __FUNCTION__, (process_xp == hal_remote_l64( XPTR( process_cxy , &process_ptr->owner_xp ) ) ),
1196"process must be owner process\n" );
1197
1198    // get target process pid and lpid
1199    pid  = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid) );
1200    lpid = LPID_FROM_PID( pid );
1201
1202#if DEBUG_PROCESS_FD_REMOVE
1203uint32_t    cycle = (uint32_t)hal_get_cycles();
1204thread_t  * this  = CURRENT_THREAD;
1205if( DEBUG_PROCESS_FD_REMOVE < cycle )
1206printk("\n[%s] thread[%x,%x] enter for fdid %d in process %x / cycle %d\n",
1207__FUNCTION__, this->process->pid, this->trdid, fdid, pid, cycle );
1208#endif
1209
1210    // get extended pointer on file descriptor
1211    file_xp = hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[fdid] ));
1212
1213    // build extended pointers on list_of_copies root and lock (in owner cluster)
1214    xptr_t copies_root_xp = XPTR( process_cxy , &LOCAL_CLUSTER->pmgr.copies_root[lpid] );
1215    xptr_t copies_lock_xp = XPTR( process_cxy , &LOCAL_CLUSTER->pmgr.copies_lock[lpid] );
1216 
1217    // build extended pointer on fd_array lock and max
1218    xptr_t fd_lock_xp = XPTR( process_cxy , &process_ptr->fd_array.lock );
1219    xptr_t fd_max_xp  = XPTR( process_cxy , &process_ptr->fd_array.max );
1220
1221    // take lock protecting fd_array
1222        remote_queuelock_acquire( fd_lock_xp );
1223
1224    // take the lock protecting the list of copies
1225    remote_queuelock_acquire( copies_lock_xp );
1226
1227    // get max value
1228    uint32_t max = hal_remote_l32( fd_max_xp );
1229
1230    // loop on list of process copies
1231    XLIST_FOREACH( copies_root_xp , iter_xp )
1232    {
1233        // get pointers on process copy
1234        copy_xp  = XLIST_ELEMENT( iter_xp , process_t , copies_list );
1235        copy_ptr = GET_PTR( copy_xp );
1236        copy_cxy = GET_CXY( copy_xp );
1237
1238        // release the fd_array entry in process copy
1239        hal_remote_s64( XPTR( copy_cxy , &copy_ptr->fd_array.array[fdid] ), XPTR_NULL );
1240    }
1241
1242    // update max when required
1243    if( fdid == max ) hal_remote_s32( fd_max_xp , max-1 );
1244
1245    // release the lock protecting fd_array
1246        remote_queuelock_release( fd_lock_xp );
1247
1248    // release the lock protecting the list of copies
1249    remote_queuelock_release( copies_lock_xp );
1250
1251#if DEBUG_PROCESS_FD_REMOVE
1252cycle = (uint32_t)hal_get_cycles();
1253if( DEBUG_PROCESS_FD_REMOVE < cycle )
1254printk("\n[%s] thread[%x,%x] exit for fdid %d in process %x / cycle %d\n",
1255__FUNCTION__, this->process->pid, this->trdid, fdid, pid, cycle );
1256#endif
1257
1258}  // end process_fd_remove()
1259
1260//////////////////////////////////////////////
1261void process_fd_clean_all( xptr_t process_xp )
1262{
1263    uint32_t  fdid;
1264    xptr_t    file_xp;         // one fd_array entry
1265    xptr_t    lock_xp;         // extendad pointer on lock protecting fd_array
1266    uint32_t  max;             // number of registered files
1267
1268    // get process cluster, local pointer and PID
1269    process_t * process_ptr = GET_PTR( process_xp );
1270    cxy_t       process_cxy = GET_CXY( process_xp );
1271
1272// check target process is owner process
1273assert( __FUNCTION__, (process_xp == hal_remote_l64( XPTR( process_cxy , &process_ptr->owner_xp )) ),
1274"process must be owner process\n" );
1275
1276#if DEBUG_PROCESS_FD_CLEAN_ALL
1277thread_t * this  = CURRENT_THREAD;
1278uint32_t   cycle = (uint32_t)hal_get_cycles();
1279if( DEBUG_PROCESS_FD_CLEAN_ALL < cycle )
1280printk("\n[%s] thread[%x,%x] enter / cycle %d\n",
1281__FUNCTION__, this->process->pid, this->trdid, cycle );
1282
1283process_fd_display( process_xp );
1284#endif
1285
1286    // build extended pointer on lock protecting the fd_array
1287    lock_xp = XPTR( process_cxy , &process_ptr->fd_array.lock );
1288
1289    // get max index for fd_array
1290    max = hal_remote_l32( XPTR( process_cxy , &process_ptr->fd_array.max ));
1291
1292    // take lock protecting fd_array
1293        remote_queuelock_acquire( lock_xp );
1294
1295    for( fdid = 0 ; fdid <= max ; fdid++ )
1296    {
1297        // get fd_array entry
1298        file_xp = hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[fdid] ) );
1299       
1300        if ( file_xp != XPTR_NULL )
1301        {
1302            vfs_file_t * file_ptr = GET_PTR( file_xp );
1303            cxy_t        file_cxy = GET_CXY( file_xp );
1304
1305            // get file type
1306            uint32_t file_type = hal_remote_l32( XPTR( file_cxy , &file_ptr->type ));
1307 
1308            if( file_type == FILE_TYPE_REG )
1309            {
1310                vfs_close( file_xp , fdid );
1311            }
1312            if( file_type == FILE_TYPE_SOCK )
1313            {
1314                socket_close( file_xp , fdid );
1315            }
1316        }
1317    }
1318
1319    // release lock protecting fd_array
1320        remote_queuelock_release( lock_xp );
1321
1322#if DEBUG_PROCESS_FD_CLEAN_ALL
1323cycle = (uint32_t)hal_get_cycles();
1324if( DEBUG_PROCESS_FD_CLEAN_ALL < cycle )
1325printk("\n[%s] thread[%x,%x] exit for process %x / cycle %d\n",
1326__FUNCTION__, this->process->pid, this->trdid, pid, cycle );
1327#endif
1328
1329}  // end process_fd_clean_all()
1330
1331//////////////////////////////////////////////////////////////
1332xptr_t process_fd_get_xptr_from_owner( xptr_t      process_xp,
1333                                       uint32_t    fdid )
1334{
1335    cxy_t       process_cxy = GET_CXY( process_xp );
1336    process_t * process_ptr = GET_PTR( process_xp );
1337
1338assert( __FUNCTION__, (hal_remote_l64( XPTR( process_cxy , &process_ptr->owner_xp )) == process_xp),
1339"process_xp argument must be the owner process" );
1340
1341    // access owner process fd_array
1342    return hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[fdid] ));
1343
1344}  // end process_fd_get_xptr_from_owner()
1345
1346///////////////////////////////////////////////////////////
1347xptr_t process_fd_get_xptr_from_local( process_t * process,
1348                                       uint32_t    fdid )
1349{
1350    xptr_t  file_xp;
1351    xptr_t  lock_xp;
1352
1353    // access local copy of process descriptor
1354    file_xp = process->fd_array.array[fdid];
1355
1356    if( file_xp == XPTR_NULL )
1357    {
1358        // get owner process cluster and local pointer
1359        xptr_t      owner_xp  = process->owner_xp;
1360        cxy_t       owner_cxy = GET_CXY( owner_xp );
1361        process_t * owner_ptr = GET_PTR( owner_xp );
1362
1363        // build extended pointer on lock protecting fd_array
1364        lock_xp = XPTR( owner_cxy , &owner_ptr->fd_array.lock );
1365
1366        // take lock protecting fd_array
1367            remote_queuelock_acquire( lock_xp );
1368
1369        // access owner process descriptor
1370        file_xp = hal_remote_l64( XPTR( owner_cxy , &owner_ptr->fd_array.array[fdid] ) );
1371
1372        if( file_xp != XPTR_NULL ) 
1373        {
1374           // update local fd_array
1375            process->fd_array.array[fdid] = file_xp;
1376        }
1377
1378        // release lock protecting fd_array
1379            remote_queuelock_release( lock_xp );
1380    }
1381
1382    return file_xp;
1383
1384}  // end process_fd_get_xptr_from_local()
1385
1386/////////////////////////////////////////
1387void process_fd_replicate( xptr_t dst_xp,
1388                           xptr_t src_xp )
1389{
1390    uint32_t fdid;      // current file descriptor index
1391    xptr_t   old_xp;    // extended pointer on a file descriptor (stored in SRC fd_array)
1392    xptr_t   new_xp;    // extended pointer on a file descriptor (stored in DST fd_array)
1393    error_t  error;
1394
1395    // get cluster and local pointer for SRC process
1396    cxy_t       src_cxy = GET_CXY( src_xp );
1397    process_t * src_ptr = GET_PTR( src_xp );
1398
1399assert( __FUNCTION__, (src_xp == hal_remote_l64( XPTR( src_cxy , &src_ptr->owner_xp ))),
1400"src_xp process not in owner cluster" );
1401
1402    // get cluster and local pointer for DST fd_array
1403    cxy_t       dst_cxy = GET_CXY( dst_xp );
1404    process_t * dst_ptr = GET_PTR( dst_xp );
1405
1406assert( __FUNCTION__, (dst_xp == hal_remote_l64( XPTR( dst_cxy , &dst_ptr->owner_xp ))),
1407"dst_xp process not in owner cluster" );
1408
1409    // build extende pointers on SRC fd_array lock and max fields
1410    xptr_t  src_lock_xp = XPTR( src_cxy , &src_ptr->fd_array.lock );
1411    xptr_t  src_max_xp  = XPTR( src_cxy , &src_ptr->fd_array.max );
1412
1413    // get the remote lock protecting the src fd_array
1414        remote_queuelock_acquire( src_lock_xp );
1415 
1416    // loop on fd_array entries
1417    for( fdid = 0 ; fdid <= hal_remote_l32( src_max_xp ) ; fdid++ )
1418        {
1419                old_xp = (xptr_t)hal_remote_l64( XPTR( src_cxy , &src_ptr->fd_array.array[fdid] ) );
1420
1421                if( old_xp != XPTR_NULL )
1422                {
1423            // get the existing file descriptor cluster and local pointer
1424            vfs_file_t * old_ptr = GET_PTR( old_xp );
1425            cxy_t        old_cxy = GET_CXY( old_xp );
1426
1427            // get existing file attributes and local pointer on inode
1428            uint32_t      attr      = hal_remote_l32( XPTR( old_cxy , &old_ptr->attr ) );
1429            vfs_inode_t * inode_ptr = hal_remote_lpt( XPTR( old_cxy , &old_ptr->inode ) );
1430
1431            // create a new file descriptor in same cluster as the existing one
1432            error = vfs_file_create( XPTR( old_cxy , inode_ptr ),
1433                                     attr,
1434                                     &new_xp );
1435            if( error )
1436            {
1437                printk("\n[ERROR] in %s : cannot create new file\n", __FUNCTION__ );
1438                return;
1439            }
1440
1441                        // register new_xp in DST fd_array
1442                        hal_remote_s64( XPTR( dst_cxy , &dst_ptr->fd_array.array[fdid] ) , new_xp );
1443                }
1444        }
1445
1446    // release lock on source process fd_array
1447        remote_queuelock_release( src_lock_xp );
1448
1449}  // end process_fd_replicate()
1450
1451
1452////////////////////////////////////
1453bool_t process_fd_array_full( void )
1454{
1455    // get extended pointer on owner process
1456    xptr_t owner_xp = CURRENT_THREAD->process->owner_xp;
1457
1458    // get owner process cluster and local pointer
1459    process_t * owner_ptr = GET_PTR( owner_xp );
1460    cxy_t       owner_cxy = GET_CXY( owner_xp );
1461
1462    // get number of open file descriptors from  fd_array
1463    uint32_t max = hal_remote_l32( XPTR( owner_cxy , &owner_ptr->fd_array.max ));
1464
1465        return ( max == CONFIG_PROCESS_FILE_MAX_NR - 1 );
1466}
1467
1468////////////////////////////////////////////
1469void process_fd_display( xptr_t process_xp )
1470{
1471    uint32_t      fdid;
1472    xptr_t        file_xp;
1473    vfs_file_t *  file_ptr;
1474    cxy_t         file_cxy;
1475    uint32_t      file_type;
1476    xptr_t        inode_xp;
1477    vfs_inode_t * inode_ptr;
1478
1479    char          name[CONFIG_VFS_MAX_NAME_LENGTH];
1480
1481    // get process cluster and local pointer
1482    process_t * process_ptr = GET_PTR( process_xp );
1483    cxy_t       process_cxy = GET_CXY( process_xp );
1484
1485    // get process PID
1486    pid_t  pid = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid ));
1487
1488    // get pointers on owner process descriptor
1489    xptr_t      owner_xp  = hal_remote_l64( XPTR( process_cxy , &process_ptr->owner_xp ));
1490    process_t * owner_ptr = GET_PTR( owner_xp );
1491    cxy_t       owner_cxy = GET_CXY( owner_xp );
1492
1493    // get max fdid from owner process descriptor
1494    uint32_t max = hal_remote_l32( XPTR( owner_cxy , &owner_ptr->fd_array.max ));
1495
1496    printk("\n***** fd_array for pid %x in cluster %x / max %d *****\n",
1497    pid, process_cxy, max );
1498
1499    for( fdid = 0 ; fdid <= max ; fdid++ )
1500    {
1501        // get pointers on file descriptor
1502        file_xp  = hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[fdid] ));
1503        file_ptr = GET_PTR( file_xp );
1504        file_cxy = GET_CXY( file_xp );
1505
1506        if( file_xp != XPTR_NULL )
1507        {
1508            // get file type
1509            file_type = hal_remote_l32( XPTR( file_cxy , &file_ptr->type )); 
1510
1511            // get file name if inode exist
1512            if( (file_type != FILE_TYPE_PIPE) && (file_type != FILE_TYPE_SOCK) )
1513            {
1514                // get inode pointers
1515                inode_ptr = hal_remote_lpt( XPTR( file_cxy , &file_ptr->inode ));
1516                inode_xp  = XPTR( file_cxy , inode_ptr );
1517
1518                // get file name
1519                vfs_inode_get_name( inode_xp , name );
1520
1521                // display relevant file descriptor info
1522                printk(" - %d : type %s / ptr %x (%s)\n",
1523                fdid, process_fd_type_str(file_type), file_ptr, name );
1524            }
1525            else    // PIPE or SOCK types
1526            {
1527                // display relevant file decriptor info
1528                printk(" - %d : type %s / ptr %x\n",
1529                fdid , process_fd_type_str(file_type), file_ptr );
1530            }
1531        }
1532        else
1533        {
1534            printk(" - %d : empty slot\n",
1535            fdid );
1536        }
1537    }
1538}   // end process_fd_display()
1539
1540////////////////////////////////////////////////////////////////////////////////////
1541//  Thread related functions
1542////////////////////////////////////////////////////////////////////////////////////
1543
1544/////////////////////////////////////////////////////
1545error_t process_register_thread( process_t * process,
1546                                 thread_t  * thread,
1547                                 trdid_t   * trdid )
1548{
1549    ltid_t         ltid;
1550    bool_t         found = false;
1551 
1552// check arguments
1553assert( __FUNCTION__, (process != NULL) , "process argument is NULL" );
1554assert( __FUNCTION__, (thread != NULL) , "thread argument is NULL" );
1555
1556    // get the lock protecting th_tbl for all threads
1557    // but the idle thread executing kernel_init (cannot yield)
1558    if( thread->type != THREAD_IDLE ) rwlock_wr_acquire( &process->th_lock );
1559
1560    // scan th_tbl
1561    for( ltid = 0 ; ltid < CONFIG_THREADS_MAX_PER_CLUSTER ; ltid++ )
1562    {
1563        if( process->th_tbl[ltid] == NULL )
1564        {
1565            found = true;
1566            break;
1567        }
1568    }
1569
1570    if( found )
1571    {
1572        // register thread in th_tbl[]
1573        process->th_tbl[ltid] = thread;
1574        process->th_nr++;
1575
1576        // returns trdid
1577        *trdid = TRDID( local_cxy , ltid );
1578    }
1579
1580    // release the lock protecting th_tbl
1581    if( thread->type != THREAD_IDLE ) rwlock_wr_release( &process->th_lock );
1582
1583    return (found) ? 0 : 0xFFFFFFFF;
1584
1585}  // end process_register_thread()
1586
1587///////////////////////////////////////////////////
1588uint32_t process_remove_thread( thread_t * thread )
1589{
1590    uint32_t count;  // number of threads in local process descriptor
1591
1592// check thread
1593assert( __FUNCTION__, (thread != NULL) , "thread argument is NULL" );
1594
1595    process_t * process = thread->process;
1596
1597    // get thread local index
1598    ltid_t  ltid = LTID_FROM_TRDID( thread->trdid );
1599   
1600    // get the lock protecting th_tbl[]
1601    rwlock_wr_acquire( &process->th_lock );
1602
1603    // get number of threads
1604    count = process->th_nr;
1605
1606// check th_nr value
1607assert( __FUNCTION__, (count > 0) , "process th_nr cannot be 0" );
1608
1609    // remove thread from th_tbl[]
1610    process->th_tbl[ltid] = NULL;
1611    process->th_nr = count-1;
1612
1613    // release lock protecting th_tbl
1614    rwlock_wr_release( &process->th_lock );
1615
1616    return count;
1617
1618}  // end process_remove_thread()
1619
1620/////////////////////////////////////////////////////////
1621error_t process_make_fork( xptr_t      parent_process_xp,
1622                           xptr_t      parent_thread_xp,
1623                           pid_t     * child_pid,
1624                           thread_t ** child_thread )
1625{
1626    process_t * process;         // local pointer on child process descriptor
1627    thread_t  * thread;          // local pointer on child thread descriptor
1628    pid_t       new_pid;         // process identifier for child process
1629    pid_t       parent_pid;      // process identifier for parent process
1630    xptr_t      ref_xp;          // extended pointer on reference process
1631    xptr_t      vfs_bin_xp;      // extended pointer on .elf file
1632    error_t     error;
1633
1634    // get cluster and local pointer for parent process
1635    cxy_t       parent_process_cxy = GET_CXY( parent_process_xp );
1636    process_t * parent_process_ptr = GET_PTR( parent_process_xp );
1637
1638    // get parent process PID and extended pointer on .elf file
1639    parent_pid = hal_remote_l32 (XPTR( parent_process_cxy , &parent_process_ptr->pid));
1640    vfs_bin_xp = hal_remote_l64(XPTR( parent_process_cxy , &parent_process_ptr->vfs_bin_xp));
1641
1642    // get extended pointer on reference process
1643    ref_xp = hal_remote_l64( XPTR( parent_process_cxy , &parent_process_ptr->ref_xp ) );
1644
1645// check parent process is the reference process
1646assert( __FUNCTION__, (parent_process_xp == ref_xp ) ,
1647"parent process must be the reference process" );
1648
1649#if DEBUG_PROCESS_MAKE_FORK
1650uint32_t   cycle;
1651thread_t * this  = CURRENT_THREAD;
1652trdid_t    trdid = this->trdid;
1653pid_t      pid   = this->process->pid;
1654#endif
1655
1656#if( DEBUG_PROCESS_MAKE_FORK & 1 )
1657cycle   = (uint32_t)hal_get_cycles();
1658if( DEBUG_PROCESS_MAKE_FORK < cycle )
1659printk("\n[%s] thread[%x,%x] enter / cluster %x / cycle %d\n",
1660__FUNCTION__, pid, trdid, local_cxy, cycle );
1661#endif
1662
1663    // allocate a process descriptor
1664    process = process_alloc();
1665
1666    if( process == NULL )
1667    {
1668        printk("\n[ERROR] in %s : cannot get process in cluster %x\n", 
1669        __FUNCTION__, local_cxy ); 
1670        return -1;
1671    }
1672
1673    // allocate a child PID from local cluster
1674    error = cluster_pid_alloc( process , &new_pid );
1675    if( error ) 
1676    {
1677        printk("\n[ERROR] in %s : cannot get PID in cluster %x\n", 
1678        __FUNCTION__, local_cxy ); 
1679        process_free( process );
1680        return -1;
1681    }
1682
1683#if( DEBUG_PROCESS_MAKE_FORK & 1 )
1684cycle = (uint32_t)hal_get_cycles();
1685if( DEBUG_PROCESS_MAKE_FORK < cycle )
1686printk("\n[%s] thread[%x,%x] allocated child_process %x / cycle %d\n",
1687__FUNCTION__, pid, trdid, new_pid, cycle );
1688#endif
1689
1690    // initializes child process descriptor from parent process descriptor
1691    error = process_reference_init( process,
1692                                    new_pid,
1693                                    parent_process_xp );
1694    if( error ) 
1695    {
1696        printk("\n[ERROR] in %s : cannot initialize child process in cluster %x\n", 
1697        __FUNCTION__, local_cxy ); 
1698        process_free( process );
1699        return -1;
1700    }
1701
1702#if( DEBUG_PROCESS_MAKE_FORK & 1 )
1703cycle = (uint32_t)hal_get_cycles();
1704if( DEBUG_PROCESS_MAKE_FORK < cycle )
1705printk("\n[%s] thread[%x,%x] initialized child_process %x / cycle %d\n",
1706__FUNCTION__, pid, trdid, new_pid, cycle );
1707#endif
1708
1709    // copy VMM from parent descriptor to child descriptor
1710    error = vmm_fork_copy( process,
1711                           parent_process_xp );
1712    if( error )
1713    {
1714        printk("\n[ERROR] in %s : cannot copy VMM in cluster %x\n", 
1715        __FUNCTION__, local_cxy ); 
1716        process_free( process );
1717        cluster_pid_release( new_pid );
1718        return -1;
1719    }
1720
1721#if( DEBUG_PROCESS_MAKE_FORK & 1 )
1722cycle = (uint32_t)hal_get_cycles();
1723if( DEBUG_PROCESS_MAKE_FORK < cycle )
1724{
1725    printk("\n[%s] thread[%x,%x] copied VMM from parent to child / cycle %d\n",
1726    __FUNCTION__, pid, trdid, cycle );
1727    hal_vmm_display( XPTR( local_cxy , process ) , true );
1728}
1729#endif
1730
1731    // if parent_process is INIT, or if parent_process is the TXT owner,
1732    // the child_process becomes the owner of its TXT terminal
1733    if( (parent_pid == 1) || process_txt_is_owner( parent_process_xp ) )
1734    {
1735        process_txt_set_ownership( XPTR( local_cxy , process ) );
1736
1737#if( DEBUG_PROCESS_MAKE_FORK & 1 )
1738cycle = (uint32_t)hal_get_cycles();
1739if( DEBUG_PROCESS_MAKE_FORK < cycle )
1740printk("\n[%s] thread[%x,%x] / child_process %x takes TXT ownership / cycle %d\n",
1741__FUNCTION__ , pid, trdid, new_pid, cycle );
1742#endif
1743
1744    }
1745
1746    // update extended pointer on .elf file
1747    process->vfs_bin_xp = vfs_bin_xp;
1748
1749    // create child thread descriptor from parent thread descriptor
1750    error = thread_user_fork( parent_thread_xp,
1751                              process,
1752                              &thread );
1753    if( error )
1754    {
1755        printk("\n[ERROR] in %s : cannot create thread in cluster %x\n",
1756        __FUNCTION__, local_cxy ); 
1757        process_free( process );
1758        cluster_pid_release( new_pid );
1759        return -1;
1760    }
1761
1762// check main thread LTID
1763assert( __FUNCTION__, (LTID_FROM_TRDID(thread->trdid) == 0) ,
1764"main thread must have LTID == 0" );
1765
1766#if( DEBUG_PROCESS_MAKE_FORK & 1 )
1767cycle = (uint32_t)hal_get_cycles();
1768if( DEBUG_PROCESS_MAKE_FORK < cycle )
1769printk("\n[%s] thread[%x,%x] created main thread %x / cycle %d\n", 
1770__FUNCTION__, pid, trdid, thread, cycle );
1771#endif
1772
1773    // set COW flag in DATA, ANON, REMOTE vsegs in parent process VMM
1774    // this includes all parent process copies in all clusters
1775    if( parent_process_cxy == local_cxy )   // reference is local
1776    {
1777        vmm_set_cow( parent_process_ptr );
1778    }
1779    else                                    // reference is remote
1780    {
1781        rpc_vmm_set_cow_client( parent_process_cxy,
1782                                parent_process_ptr );
1783    }
1784
1785    // set COW flag in DATA, ANON, REMOTE vsegs for child process VMM
1786    vmm_set_cow( process );
1787 
1788#if( DEBUG_PROCESS_MAKE_FORK & 1 )
1789cycle = (uint32_t)hal_get_cycles();
1790if( DEBUG_PROCESS_MAKE_FORK < cycle )
1791printk("\n[%s] thread[%x,%x] set COW in DATA / ANON / REMOTE for parent and child / cycle %d\n",
1792__FUNCTION__, pid, trdid, cycle );
1793#endif
1794
1795    // get extended pointers on parent children_root, children_lock and children_nr
1796    xptr_t children_root_xp = XPTR( parent_process_cxy , &parent_process_ptr->children_root );
1797    xptr_t children_lock_xp = XPTR( parent_process_cxy , &parent_process_ptr->children_lock );
1798    xptr_t children_nr_xp   = XPTR( parent_process_cxy , &parent_process_ptr->children_nr   );
1799
1800    // register process in parent children list
1801    remote_queuelock_acquire( children_lock_xp );
1802        xlist_add_last( children_root_xp , XPTR( local_cxy , &process->children_list ) );
1803        hal_remote_atomic_add( children_nr_xp , 1 );
1804    remote_queuelock_release( children_lock_xp );
1805
1806    // return success
1807    *child_thread = thread;
1808    *child_pid    = new_pid;
1809
1810#if DEBUG_PROCESS_MAKE_FORK
1811cycle = (uint32_t)hal_get_cycles();
1812if( DEBUG_PROCESS_MAKE_FORK < cycle )
1813printk("\n[%s] thread[%x,%x] exit / created process %x / cycle %d\n",
1814__FUNCTION__, pid, trdid, new_pid, cycle );
1815#endif
1816
1817    return 0;
1818
1819}   // end process_make_fork()
1820
1821////////////////////////////////////////////////i//////////////////////////////////////
1822// This static function is called by the thread_user_exec() function :
1823// - to register the main() arguments (args) in the <exec_info> structure.
1824// - to register the environment variables (envs) in the <exec_info> structure.
1825// In both cases the input is an array of NULL terminated string pointers in user
1826// space, and the strings can be dispatched anywhere in the user process space.
1827// This array of pointers is defined by the <u_pointers> argument. The empty slots
1828// contain the NULL value, and the N non-empty slots are indexed from 0 to (N-1).
1829// - The max number of envs, and the max number of args are defined by the
1830//   CONFIG_PROCESS_ARGS_NR and CONFIG_PROCESS_ENVS_MAX_NR parameters.
1831// - The numbers of pages to store the (args) and (envs) strings are defined by the
1832//   CONFIG_VMM_ENVS_SIZE and CONFIG_VMM_STACK_SIZE parameters.
1833///////////////////////////////////////////////////////////////////////////////////////
1834// Implementation note:
1835// It allocates a kernel buffer to store a kernel copy of both the array of pointers,
1836// and the strings. It set the pointers and copies the strings in this kernel buffer.
1837// Finally, it registers the buffer & the actual number of strings in the process
1838// exec_info structure  (defined in the <process.h> file).
1839///////////////////////////////////////////////////////////////////////////////////////
1840// @ is_args     : [in]    true if called for (args) / false if called for (envs).
1841// @ u_pointers  : [in]    array of pointers on the strings (in user space).
1842// @ exec_info   : [out]   pointer on the exec_info structure.
1843// @ return 0 if success / non-zero if too many strings or no memory.
1844///////////////////////////////////////////////////////////////////////////////////////
1845error_t process_exec_get_strings( bool_t         is_args,
1846                                  char        ** u_pointers,
1847                                  exec_info_t  * exec_info )
1848{
1849    uint32_t     index;           // slot index in pointers array
1850    uint32_t     length;          // string length (in bytes)
1851    uint32_t     pointers_bytes;  // number of bytes to store pointers
1852    uint32_t     max_index;       // max size of pointers array
1853    char      ** k_pointers;      // base of kernel array of pointers
1854    char       * k_buf_ptr;       // pointer on first empty slot in strings buffer
1855    uint32_t     k_buf_space;     // number of bytes available in string buffer
1856    kmem_req_t   req;             // kernel memory allocator request
1857    char       * k_buf;           // kernel buffer for both pointers & strings
1858
1859#if DEBUG_PROCESS_EXEC_GET_STRINGS
1860thread_t * this  = CURRENT_THREAD;
1861uint32_t   cycle = (uint32_t)hal_get_cycles();
1862#endif
1863
1864    // Allocate one block of physical memory for both the pointers and the strings
1865    // as defined by the CONFIG_VMM_ARGS_SIZE and CONFIG_VMM_ENVS_SIZE parameters
1866    // - the array of pointers is stored in the first bytes of the kernel buffer
1867    // - the strings themselve are stored in the next bytes of this buffer
1868    // Set the k_pointers, k_buf_ptr, k_buf_space, and max_index
1869
1870    if( is_args )
1871    {
1872        req.type   = KMEM_PPM;
1873        req.order  = bits_log2( CONFIG_VMM_ARGS_SIZE );
1874        req.flags  = AF_KERNEL | AF_ZERO;
1875        k_buf      = kmem_alloc( &req );
1876
1877        pointers_bytes = CONFIG_PROCESS_ARGS_MAX_NR * sizeof(char *);
1878        k_pointers     = (char **)k_buf;
1879        k_buf_ptr      = k_buf + pointers_bytes;
1880        k_buf_space    = (CONFIG_VMM_ARGS_SIZE * CONFIG_PPM_PAGE_SIZE) - pointers_bytes;
1881        max_index      = CONFIG_PROCESS_ARGS_MAX_NR;
1882
1883#if DEBUG_PROCESS_EXEC_GET_STRINGS
1884if( DEBUG_PROCESS_EXEC_GET_STRINGS < cycle )
1885printk("\n[%s] thread[%x,%x] for args / u_buf %x / k_buf %x\n",
1886__FUNCTION__, this->process->pid, this->trdid, u_pointers, k_buf );
1887#endif
1888
1889    }
1890    else
1891    {
1892        req.type   = KMEM_PPM;
1893        req.order  = bits_log2( CONFIG_VMM_ENVS_SIZE );
1894        req.flags  = AF_KERNEL | AF_ZERO;
1895        k_buf      = kmem_alloc( &req );
1896
1897        pointers_bytes = CONFIG_PROCESS_ENVS_MAX_NR * sizeof(char *);
1898        k_pointers     = (char **)k_buf;
1899        k_buf_ptr      = k_buf + pointers_bytes;
1900        k_buf_space    = (CONFIG_VMM_ENVS_SIZE * CONFIG_PPM_PAGE_SIZE) - pointers_bytes;
1901        max_index      = CONFIG_PROCESS_ENVS_MAX_NR;
1902
1903#if DEBUG_PROCESS_EXEC_GET_STRINGS
1904if( DEBUG_PROCESS_EXEC_GET_STRINGS < cycle )
1905printk("\n[%s] thread[%x,%x] for envs / u_buf %x / k_buf %x\n",
1906__FUNCTION__, this->process->pid, this->trdid, u_pointers, k_buf );
1907#endif
1908
1909    }
1910
1911    // copy the user array of pointers to kernel buffer
1912    hal_copy_from_uspace( XPTR( local_cxy , k_pointers ),
1913                          u_pointers,
1914                          pointers_bytes );
1915
1916    // WARNING : the pointers copied in the k_pointers[] array are user pointers,
1917    // after the loop below, the k_pointers[] array contains kernel pointers.
1918
1919#if DEBUG_PROCESS_EXEC_GET_STRINGS
1920if( DEBUG_PROCESS_EXEC_GET_STRINGS < cycle )
1921printk("\n[%s] thread[%x,%x] copied u_ptr array to k_ptr array\n"
1922"    p0 = %x / p1 = %x / p2 = %x / p3 = %x\n",
1923__FUNCTION__, this->process->pid, this->trdid,
1924k_pointers[0], k_pointers[1], k_pointers[2], k_pointers[3] );
1925#endif
1926
1927    // scan kernel array of pointers to copy strings to kernel buffer
1928    for( index = 0 ; index < max_index ; index++ )
1929    {
1930        // exit loop if (k_pointers[] == NUll)
1931        if( k_pointers[index] == NULL ) break;
1932
1933        // compute string length
1934        length = hal_strlen_from_uspace( k_pointers[index] ) + 1;
1935
1936        // return error if overflow in kernel buffer
1937        if( length > k_buf_space ) return -1;
1938
1939        // copy the string to kernel buffer
1940        hal_copy_from_uspace( XPTR( local_cxy , k_buf_ptr ),
1941                              k_pointers[index],
1942                              length );
1943
1944#if DEBUG_PROCESS_EXEC_GET_STRINGS
1945if( DEBUG_PROCESS_EXEC_GET_STRINGS < cycle )
1946printk("\n[%s] thread[%x,%x] copied string[%d] <%s> to kernel buffer / length %d\n",
1947__FUNCTION__, this->process->pid, this->trdid, index, k_buf_ptr, length );
1948#endif
1949
1950        // replace the user pointer by a kernel pointer in the k_pointer[] array
1951        k_pointers[index] = k_buf_ptr;
1952
1953        // increment loop variables
1954        k_buf_ptr   += length;
1955        k_buf_space -= length;
1956
1957    }  // end loop on index
1958
1959    // update into exec_info structure
1960    if( is_args )
1961    {
1962        exec_info->args_pointers  =  k_pointers;
1963        exec_info->args_nr        =  index;
1964    }
1965    else
1966    {
1967        exec_info->envs_pointers  =  k_pointers;
1968        exec_info->envs_buf_free  =  k_buf_ptr;
1969        exec_info->envs_nr        =  index;
1970    }
1971
1972#if DEBUG_PROCESS_EXEC_GET_STRINGS
1973if( DEBUG_PROCESS_EXEC_GET_STRINGS < cycle )
1974printk("\n[%s] thread[%x,%x] copied %d strings to kernel buffer\n",
1975__FUNCTION__, this->process->pid, this->trdid, index );
1976#endif
1977
1978    return 0;
1979
1980} // end process_exec_get_strings()
1981
1982/////////////////////////////////
1983error_t process_make_exec( void )
1984{
1985    thread_t       * this;                    // local pointer on this thread
1986    process_t      * process;                 // local pointer on this process
1987    pid_t            pid;                     // this process identifier
1988    trdid_t          trdid;                   // this thread identifier
1989    xptr_t           ref_xp;                  // reference process for this process
1990        error_t          error;                   // value returned by called functions
1991    char           * elf_path;                // path to .elf file
1992    xptr_t           file_xp;                 // extended pointer on .elf file descriptor
1993    uint32_t         file_id;                 // file index in fd_array
1994    vseg_t         * vseg;                    // local pointer on created vseg(s)
1995    uint32_t         n;                       // index for loops
1996
1997    uint32_t         args_nr;                 // actual number of args (from exec_info)
1998    intptr_t         args_base;               // args vseg base address in user space
1999    uint32_t         args_size;               // args vseg size (bytes)
2000
2001    uint32_t         envs_nr;                 // actual number of envs (from exec_info)
2002    intptr_t         envs_base;               // envs vseg base address in user space
2003    uint32_t         envs_size;               // envs vseg size (bytes)
2004
2005    // get calling thread, process, pid, trdid, and ref_xp
2006    this    = CURRENT_THREAD;
2007    process = this->process;
2008    pid     = process->pid;
2009    trdid   = this->trdid;
2010    ref_xp  = process->ref_xp;
2011
2012        // get .elf pathname from exec_info structure
2013        elf_path      = process->exec_info.path;
2014
2015#if DEBUG_PROCESS_MAKE_EXEC
2016uint32_t cycle = (uint32_t)hal_get_cycles();
2017if( DEBUG_PROCESS_MAKE_EXEC < cycle )
2018printk("\n[%s] thread[%x,%x] enters for <%s> / cycle %d\n",
2019__FUNCTION__, pid, trdid, elf_path, cycle );
2020#endif
2021
2022    // 1. open the file identified by <path>
2023    file_xp = XPTR_NULL;
2024    file_id = 0xFFFFFFFF;
2025        error   = vfs_open( process->vfs_root_xp,
2026                            elf_path,
2027                        ref_xp,
2028                            O_RDONLY,
2029                            0,
2030                            &file_xp,
2031                            &file_id );
2032        if( error )
2033        {
2034                printk("\n[ERROR] in %s : thread[%x,%x] failed to open file <%s>\n",
2035        __FUNCTION__, pid, trdid, elf_path );
2036                return -1;
2037        }
2038
2039#if (DEBUG_PROCESS_MAKE_EXEC & 1)
2040if( DEBUG_PROCESS_MAKE_EXEC < cycle )
2041printk("\n[%s] thread[%x,%x] opened file <%s>\n",
2042__FUNCTION__, pid, trdid, elf_path );
2043#endif
2044
2045    // 2. delete all threads other than this main thread in all clusters
2046    process_sigaction( pid , DELETE_ALL_THREADS );
2047
2048#if (DEBUG_PROCESS_MAKE_EXEC & 1)
2049if( DEBUG_PROCESS_MAKE_EXEC < cycle )
2050printk("\n[%s] thread[%x,%x] deleted existing threads\n",
2051__FUNCTION__, pid, trdid );
2052#endif
2053
2054    // 3. reset calling process VMM
2055    vmm_user_reset( process );
2056
2057#if( DEBUG_PROCESS_MAKE_EXEC & 1 )
2058if( DEBUG_PROCESS_MAKE_EXEC < cycle )
2059{
2060    printk("\n[%s] thread[%x,%x] completed VMM reset\n",
2061    __FUNCTION__, pid, trdid );
2062    hal_vmm_display( ref_xp , true );
2063}
2064#endif
2065
2066    // 4. register the "args" vseg in VSL and map it in GPT, if required
2067    // this vseg contains both the array of pointers and the strings
2068    args_nr = process->exec_info.args_nr;
2069
2070    if( args_nr > 0 )
2071    {
2072        // get args vseg base and size in user space
2073        args_base = CONFIG_VMM_UTILS_BASE << CONFIG_PPM_PAGE_SHIFT;
2074        args_size = CONFIG_VMM_ARGS_SIZE << CONFIG_PPM_PAGE_SHIFT;
2075
2076        // create and register args vseg in VMM
2077        vseg = vmm_create_vseg( process,
2078                                VSEG_TYPE_DATA,
2079                                args_base,
2080                                args_size,
2081                                0,                 // file_offset unused for DATA type
2082                                0,                 // file_size unused for DATA type
2083                                XPTR_NULL,         // mapper_xp unused for DATA type
2084                                0 );               // cxy unused for DATA type
2085        if( vseg == NULL )
2086        {
2087                 printk("\n[ERROR] in %s : thread[%x,%x] cannot get args vseg for <%s>\n",
2088             __FUNCTION__, pid, trdid, elf_path );
2089                     return -1;
2090        }
2091
2092#if( DEBUG_PROCESS_MAKE_EXEC & 1 )
2093if( DEBUG_PROCESS_MAKE_EXEC < cycle )
2094{
2095    printk("\n[%s] thread[%x,%x] args vseg registered in new process VSL\n",
2096    __FUNCTION__, pid, trdid );
2097    hal_vmm_display( ref_xp , true );
2098}
2099#endif
2100        // map all pages for this "args" vseg
2101        uint32_t fake_attr;   // required for hal_gpt_lock_pte()
2102        ppn_t    fake_ppn;    // required for hal_gpt_lock_pte()
2103
2104        xptr_t   gpt  = XPTR( local_cxy , &process->vmm.gpt );
2105        uint32_t attr = GPT_MAPPED | GPT_SMALL | GPT_READABLE | GPT_USER | GPT_CACHABLE;
2106        vpn_t    vpn  = CONFIG_VMM_UTILS_BASE;
2107        ppn_t    ppn  = ((ppn_t)process->exec_info.args_pointers >> CONFIG_PPM_PAGE_SHIFT);
2108
2109        for( n = 0 ; n < CONFIG_VMM_ARGS_SIZE ; n++ ) 
2110        {
2111            // lock the PTE
2112            if (hal_gpt_lock_pte( gpt , vpn , &fake_attr , &fake_ppn ) )
2113            {
2114                printk("\n[ERROR] in %s : thread[%x,%x] cannot map args vpn %x for <%s>\n",
2115                __FUNCTION__, pid, trdid, vpn, elf_path );
2116                        return -1;
2117            }
2118
2119            // map and unlock the PTE
2120            hal_gpt_set_pte( gpt , vpn + n , attr , ppn + n );
2121        }
2122
2123#if( DEBUG_PROCESS_MAKE_EXEC & 1 )
2124if( DEBUG_PROCESS_MAKE_EXEC < cycle )
2125{
2126    printk("\n[%s] thread[%x,%x] args vseg mapped in new process GPT\n",
2127    __FUNCTION__, pid, trdid );
2128    hal_vmm_display( ref_xp , true );
2129}
2130#endif
2131
2132        // set user space pointers in array of pointers
2133        char  ** ptr    = process->exec_info.args_pointers;
2134
2135        for( n = 0 ; n < args_nr ; n++ )
2136        {
2137            ptr[n] = ptr[n] + args_base - (intptr_t)ptr;
2138        } 
2139    }
2140
2141    // 5. register the "envs" vseg in VSL and map it in GPT, if required
2142    // this vseg contains both the array of pointers and the strings
2143    envs_nr = process->exec_info.envs_nr;
2144
2145    if( envs_nr > 0 )
2146    {
2147        // get envs vseg base and size in user space from config
2148        envs_base = (CONFIG_VMM_UTILS_BASE + CONFIG_VMM_ARGS_SIZE) << CONFIG_PPM_PAGE_SHIFT;
2149        envs_size = CONFIG_VMM_ENVS_SIZE << CONFIG_PPM_PAGE_SHIFT;
2150
2151        // TODO (inspired from args)
2152    }
2153
2154
2155    // 6. register code & data vsegs, and entry-point in process VMM,
2156    // register extended pointer on .elf file in process descriptor
2157        error = elf_load_process( file_xp , process );
2158    if( error )
2159        {
2160                printk("\n[ERROR] in %s : thread[%x,%x] failed to access <%s>\n",
2161        __FUNCTION__, pid, trdid, elf_path );
2162        return -1;
2163        }
2164
2165#if( DEBUG_PROCESS_MAKE_EXEC & 1 )
2166if( DEBUG_PROCESS_MAKE_EXEC < cycle )
2167{
2168    printk("\n[%s] thread[%x,%x] registered code/data vsegs / entry %x\n",
2169    __FUNCTION__, pid, trdid, process->vmm.entry_point );
2170    hal_vmm_display( ref_xp , true );
2171}
2172#endif
2173
2174    // 7. allocate an user stack vseg for main thread
2175    vseg = vmm_create_vseg( process,
2176                            VSEG_TYPE_STACK,
2177                            LTID_FROM_TRDID( trdid ),
2178                            0,                 // length unused
2179                            0,                 // file_offset unused
2180                            0,                 // file_size unused
2181                            XPTR_NULL,         // mapper_xp unused
2182                            local_cxy );
2183    if( vseg == NULL )
2184    {
2185            printk("\n[ERROR] in %s : thread[%x,%x] cannot set u_stack vseg for <%s>\n",
2186        __FUNCTION__, pid, trdid, elf_path );
2187                return -1;
2188    }
2189
2190#if( DEBUG_PROCESS_MAKE_EXEC & 1 )
2191if( DEBUG_PROCESS_MAKE_EXEC < cycle )
2192{
2193    printk("\n[%s] thread[%x,%x] registered stack vseg\n",
2194    __FUNCTION__, pid, trdid );
2195    hal_vmm_display( ref_xp , true );
2196}
2197#endif
2198
2199    // update user stack in thread descriptor
2200    this->user_stack_vseg = vseg;
2201
2202    // 8. update the main thread descriptor ... and jumps (one way) to user code
2203    thread_user_exec( args_nr , args_base );
2204
2205    if( error )
2206    {
2207        printk("\n[ERROR] in %s : thread[%x,%x] cannot update thread for <%s>\n",
2208        __FUNCTION__ , pid, trdid, elf_path );
2209        return -1;
2210    }
2211
2212        return 0;
2213
2214}  // end process_make_exec()
2215
2216
2217////////////////////////////////////////////////
2218void process_zero_create( process_t   * process,
2219                          boot_info_t * info )
2220{
2221    error_t error;
2222    pid_t   pid;
2223
2224#if DEBUG_PROCESS_ZERO_CREATE
2225uint32_t cycle = (uint32_t)hal_get_cycles();
2226if( DEBUG_PROCESS_ZERO_CREATE < cycle )
2227printk("\n[%s] enter / cluster %x / cycle %d\n",
2228__FUNCTION__, local_cxy, cycle );
2229#endif
2230
2231    // get pointer on VMM
2232    vmm_t * vmm = &process->vmm;
2233
2234    // get PID from local cluster manager for this kernel process
2235    error = cluster_pid_alloc( process , &pid );
2236
2237    if( error || (LPID_FROM_PID( pid ) != 0) )
2238    {
2239        printk("\n[PANIC] in %s : cannot get valid PID in cluster %x / PID = %x\n",
2240        __FUNCTION__ , local_cxy, pid );
2241        hal_core_sleep();
2242    }
2243
2244#if (DEBUG_PROCESS_ZERO_CREATE & 1)
2245if( DEBUG_PROCESS_ZERO_CREATE < cycle )
2246printk("\n[%s] allocated pid %x in cluster %x\n", __FUNCTION__, pid, local_cxy );
2247#endif
2248
2249    // initialize PID, REF_XP, PARENT_XP, and STATE
2250    // the kernel process_zero is its own parent_process,
2251    // reference_process, and owner_process, and cannot be killed...
2252    process->pid        = pid;
2253    process->ref_xp     = XPTR( local_cxy , process );
2254    process->owner_xp   = XPTR( local_cxy , process );
2255    process->parent_xp  = XPTR( local_cxy , process );
2256    process->term_state = 0;
2257
2258    // initialize VSL as empty
2259    vmm->vsegs_nr = 0;
2260        xlist_root_init( XPTR( local_cxy , &vmm->vsegs_root ) );
2261
2262#if (DEBUG_PROCESS_ZERO_CREATE & 1)
2263if( DEBUG_PROCESS_ZERO_CREATE < cycle )
2264printk("\n[%s] initialized VSL empty in cluster %x\n", __FUNCTION__, local_cxy );
2265#endif
2266
2267    // initialize GPT as empty
2268    error = hal_gpt_create( &vmm->gpt );
2269
2270    if( error ) 
2271    {
2272        printk("\n[PANIC] in %s : cannot create empty GPT\n", __FUNCTION__ );
2273        hal_core_sleep();
2274    }
2275
2276#if (DEBUG_PROCESS_ZERO_CREATE & 1)
2277if( DEBUG_PROCESS_ZERO_CREATE < cycle )
2278printk("\n[%s] initialized GPT empty in cluster %x\n", __FUNCTION__, local_cxy );
2279#endif
2280
2281    // initialize VSL and GPT locks
2282    remote_rwlock_init( XPTR( local_cxy , &vmm->vsl_lock ) , LOCK_VMM_VSL );
2283   
2284    // create kernel vsegs in GPT and VSL, as required by the hardware architecture
2285    error = hal_vmm_kernel_init( info );
2286
2287    if( error ) 
2288    {
2289        printk("\n[PANIC] in %s : cannot create kernel vsegs in VMM\n", __FUNCTION__ );
2290        hal_core_sleep();
2291    }
2292
2293#if (DEBUG_PROCESS_ZERO_CREATE & 1)
2294if( DEBUG_PROCESS_ZERO_CREATE < cycle )
2295printk("\n[%s] initialized hal specific VMM in cluster%x\n", __FUNCTION__, local_cxy );
2296#endif
2297
2298    // reset th_tbl[] array and associated fields
2299    uint32_t i;
2300    for( i = 0 ; i < CONFIG_THREADS_MAX_PER_CLUSTER ; i++ )
2301        {
2302        process->th_tbl[i] = NULL;
2303    }
2304    process->th_nr  = 0;
2305    rwlock_init( &process->th_lock , LOCK_PROCESS_THTBL );
2306
2307#if (DEBUG_PROCESS_ZERO_CREATE & 1)
2308if( DEBUG_PROCESS_ZERO_CREATE < cycle )
2309printk("\n[%s] initialized th_tbl[] in cluster%x\n", __FUNCTION__, local_cxy );
2310#endif
2311
2312    // reset children list as empty
2313    xlist_root_init( XPTR( local_cxy , &process->children_root ) );
2314    process->children_nr = 0;
2315    remote_queuelock_init( XPTR( local_cxy , &process->children_lock ),
2316                           LOCK_PROCESS_CHILDREN );
2317
2318#if (DEBUG_PROCESS_ZERO_CREATE & 1)
2319if( DEBUG_PROCESS_ZERO_CREATE < cycle )
2320printk("\n[%s] initialized children list in cluster%x\n", __FUNCTION__, local_cxy );
2321#endif
2322
2323    // register kernel process in cluster manager local_list
2324    cluster_process_local_link( process );
2325   
2326        hal_fence();
2327
2328#if DEBUG_PROCESS_ZERO_CREATE
2329cycle = (uint32_t)hal_get_cycles();
2330if( DEBUG_PROCESS_ZERO_CREATE < cycle )
2331printk("\n[%s] exit / cluster %x / cycle %d\n",
2332__FUNCTION__, local_cxy, cycle );
2333#endif
2334
2335}  // end process_zero_create()
2336
2337////////////////////////////////
2338void process_init_create( void )
2339{
2340    process_t      * process;       // local pointer on process descriptor
2341    pid_t            pid;           // process_init identifier
2342    thread_t       * thread;        // local pointer on main thread
2343    pthread_attr_t   attr;          // main thread attributes
2344    lid_t            lid;           // selected core local index for main thread
2345    xptr_t           file_xp;       // extended pointer on .elf file descriptor
2346    uint32_t         file_id;       // file index in fd_array
2347    error_t          error;
2348
2349#if DEBUG_PROCESS_INIT_CREATE
2350thread_t * this = CURRENT_THREAD;
2351uint32_t cycle = (uint32_t)hal_get_cycles();
2352if( DEBUG_PROCESS_INIT_CREATE < cycle )
2353printk("\n[%s] thread[%x,%x] enter / cycle %d\n",
2354__FUNCTION__, this->process->pid, this->trdid, cycle );
2355#endif
2356
2357    // allocates memory for process descriptor from local cluster
2358        process = process_alloc(); 
2359    if( process == NULL )
2360    {
2361        printk("\n[PANIC] in %s : cannot allocate process\n", __FUNCTION__ );
2362        hal_core_sleep();
2363    }
2364
2365    // set the CWD and VFS_ROOT fields in process descriptor
2366    process->cwd_xp      = process_zero.vfs_root_xp;
2367    process->vfs_root_xp = process_zero.vfs_root_xp;
2368
2369    // get PID from local cluster
2370    error = cluster_pid_alloc( process , &pid );
2371    if( error ) 
2372    {
2373        printk("\n[PANIC] in %s : cannot allocate PID\n", __FUNCTION__ );
2374        hal_core_sleep();
2375    }
2376    if( pid != 1 ) 
2377    {
2378        printk("\n[PANIC] in %s : process PID must be 0x1\n", __FUNCTION__ );
2379        hal_core_sleep();
2380    }
2381
2382    // initialize process descriptor / parent is local process_zero
2383    error = process_reference_init( process,
2384                                    pid,
2385                                    XPTR( local_cxy , &process_zero ) ); 
2386    if( error )
2387    {
2388        printk("\n[PANIC] in %s : cannot initialize process\n", __FUNCTION__ );
2389        hal_core_sleep();
2390    }
2391
2392#if(DEBUG_PROCESS_INIT_CREATE & 1)
2393if( DEBUG_PROCESS_INIT_CREATE < cycle )
2394printk("\n[%s] thread[%x,%x] initialized process descriptor\n",
2395__FUNCTION__, this->process->pid, this->trdid );
2396#endif
2397
2398    // open the file identified by CONFIG_PROCESS_INIT_PATH
2399    file_xp = XPTR_NULL;
2400    file_id = -1;
2401        error   = vfs_open( process->vfs_root_xp,
2402                            CONFIG_PROCESS_INIT_PATH,
2403                        XPTR( local_cxy , process ),
2404                            O_RDONLY,
2405                            0,
2406                            &file_xp,
2407                            &file_id );
2408    if( error )
2409    {
2410        printk("\n[PANIC] in %s : cannot open file <%s>\n",
2411         __FUNCTION__, CONFIG_PROCESS_INIT_PATH  );
2412        hal_core_sleep();
2413    }
2414
2415#if(DEBUG_PROCESS_INIT_CREATE & 1)
2416if( DEBUG_PROCESS_INIT_CREATE < cycle )
2417printk("\n[%s] thread[%x,%x] open .elf file decriptor\n",
2418__FUNCTION__, this->process->pid, this->trdid );
2419#endif
2420
2421    // register "code" and "data" vsegs as well as entry-point
2422    // in process VMM, using information contained in the elf file.
2423        error = elf_load_process( file_xp , process );
2424
2425    if( error ) 
2426    {
2427        printk("\n[PANIC] in %s : cannot access file <%s>\n",
2428         __FUNCTION__, CONFIG_PROCESS_INIT_PATH  );
2429        hal_core_sleep();
2430    }
2431
2432
2433#if(DEBUG_PROCESS_INIT_CREATE & 1)
2434if( DEBUG_PROCESS_INIT_CREATE < cycle )
2435{
2436    printk("\n[%s] thread[%x,%x] registered code/data vsegs in VMM\n",
2437    __FUNCTION__, this->process->pid, this->trdid );
2438    hal_vmm_display( XPTR( local_cxy , process ) , true );
2439}
2440#endif
2441
2442    // get extended pointers on process_zero children_root, children_lock
2443    xptr_t children_root_xp = XPTR( local_cxy , &process_zero.children_root );
2444    xptr_t children_lock_xp = XPTR( local_cxy , &process_zero.children_lock );
2445
2446    // take lock protecting kernel process children list
2447    remote_queuelock_acquire( children_lock_xp );
2448
2449    // register process INIT in parent local process_zero
2450        xlist_add_last( children_root_xp , XPTR( local_cxy , &process->children_list ) );
2451        hal_atomic_add( &process_zero.children_nr , 1 );
2452
2453    // release lock protecting kernel process children list
2454    remote_queuelock_release( children_lock_xp );
2455
2456#if(DEBUG_PROCESS_INIT_CREATE & 1)
2457if( DEBUG_PROCESS_INIT_CREATE < cycle )
2458printk("\n[%s] thread[%x,%x] registered init process in parent\n",
2459__FUNCTION__, this->process->pid, this->trdid );
2460#endif
2461
2462    // select a core in local cluster to execute the main thread
2463    lid  = cluster_select_local_core( local_cxy );
2464
2465    // initialize pthread attributes for main thread
2466    attr.attributes = PT_ATTR_DETACH | PT_ATTR_CLUSTER_DEFINED | PT_ATTR_CORE_DEFINED;
2467    attr.cxy        = local_cxy;
2468    attr.lid        = lid;
2469
2470    // create and initialize thread descriptor
2471        error = thread_user_create( pid,
2472                                (void *)process->vmm.entry_point,
2473                                NULL,
2474                                &attr,
2475                                &thread );
2476
2477    if( error )
2478    {
2479        printk("\n[PANIC] in %s : cannot create main thread\n", __FUNCTION__  );
2480        hal_core_sleep();
2481    }
2482    if( thread->trdid != 0 )
2483    {
2484        printk("\n[PANIC] in %s : bad main thread trdid\n", __FUNCTION__  );
2485        hal_core_sleep();
2486    }
2487
2488#if(DEBUG_PROCESS_INIT_CREATE & 1)
2489if( DEBUG_PROCESS_INIT_CREATE < cycle )
2490printk("\n[%s] thread[%x,%x] created main thread\n",
2491__FUNCTION__, this->process->pid, this->trdid );
2492#endif
2493
2494    // activate thread
2495        thread_unblock( XPTR( local_cxy , thread ) , THREAD_BLOCKED_GLOBAL );
2496
2497    hal_fence();
2498
2499#if DEBUG_PROCESS_INIT_CREATE
2500cycle = (uint32_t)hal_get_cycles();
2501if( DEBUG_PROCESS_INIT_CREATE < cycle )
2502printk("\n[%s] thread[%x,%x] exit / cycle %d\n",
2503__FUNCTION__, this->process->pid, this->trdid, cycle );
2504#endif
2505
2506}  // end process_init_create()
2507
2508/////////////////////////////////////////
2509void process_display( xptr_t process_xp )
2510{
2511    process_t   * process_ptr;
2512    cxy_t         process_cxy;
2513
2514    xptr_t        parent_xp;       // extended pointer on parent process
2515    process_t   * parent_ptr;
2516    cxy_t         parent_cxy;
2517
2518    xptr_t        owner_xp;        // extended pointer on owner process
2519    process_t   * owner_ptr;
2520    cxy_t         owner_cxy;
2521
2522    pid_t         pid;
2523    pid_t         ppid;
2524    lpid_t        lpid;
2525    uint32_t      state;
2526    uint32_t      th_nr;
2527
2528    xptr_t        txt_file_xp;     // extended pointer on TXT_RX file descriptor
2529    xptr_t        txt_chdev_xp;    // extended pointer on TXT_RX chdev
2530    chdev_t     * txt_chdev_ptr;
2531    cxy_t         txt_chdev_cxy;
2532    xptr_t        txt_owner_xp;    // extended pointer on TXT owner process
2533
2534    xptr_t        elf_file_xp;     // extended pointer on .elf file
2535    cxy_t         elf_file_cxy;
2536    vfs_file_t  * elf_file_ptr;
2537    vfs_inode_t * elf_inode_ptr;   // local pointer on .elf inode
2538
2539    char          txt_name[CONFIG_VFS_MAX_NAME_LENGTH];
2540    char          elf_name[CONFIG_VFS_MAX_NAME_LENGTH];
2541
2542    // get cluster and local pointer on process
2543    process_ptr = GET_PTR( process_xp );
2544    process_cxy = GET_CXY( process_xp );
2545
2546    // get process PID, LPID, and state
2547    pid   = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid ) );
2548    lpid  = LPID_FROM_PID( pid );
2549    state = hal_remote_l32( XPTR( process_cxy , &process_ptr->term_state ) );
2550
2551    // get process PPID
2552    parent_xp  = hal_remote_l64( XPTR( process_cxy , &process_ptr->parent_xp ) );
2553    parent_cxy = GET_CXY( parent_xp );
2554    parent_ptr = GET_PTR( parent_xp );
2555    ppid       = hal_remote_l32( XPTR( parent_cxy , &parent_ptr->pid ) );
2556
2557    // get number of threads
2558    th_nr      = hal_remote_l32( XPTR( process_cxy , &process_ptr->th_nr ) );
2559
2560    // get pointers on owner process descriptor
2561    owner_xp  = hal_remote_l64( XPTR( process_cxy , &process_ptr->owner_xp ) );
2562    owner_cxy = GET_CXY( owner_xp );
2563    owner_ptr = GET_PTR( owner_xp );
2564
2565    // get process TXT name and .elf name
2566    if( lpid )                                   // user process
2567    {
2568
2569        // get extended pointer on file descriptor associated to TXT_RX
2570        txt_file_xp = hal_remote_l64( XPTR( owner_cxy , &owner_ptr->fd_array.array[0] ) );
2571
2572        assert( __FUNCTION__, (txt_file_xp != XPTR_NULL) ,
2573        "process must be attached to one TXT terminal" ); 
2574
2575        // get TXT_RX chdev pointers
2576        txt_chdev_xp  = chdev_from_file( txt_file_xp );
2577        txt_chdev_cxy = GET_CXY( txt_chdev_xp );
2578        txt_chdev_ptr = GET_PTR( txt_chdev_xp );
2579
2580        // get TXT_RX name and ownership
2581        hal_remote_strcpy( XPTR( local_cxy , txt_name ) ,
2582                           XPTR( txt_chdev_cxy , txt_chdev_ptr->name ) );
2583   
2584        txt_owner_xp = (xptr_t)hal_remote_l64( XPTR( txt_chdev_cxy, 
2585                                                     &txt_chdev_ptr->ext.txt.owner_xp ) );
2586
2587        // get process .elf name
2588        elf_file_xp   = hal_remote_l64( XPTR( process_cxy , &process_ptr->vfs_bin_xp ) );
2589        elf_file_cxy  = GET_CXY( elf_file_xp );
2590        elf_file_ptr  = GET_PTR( elf_file_xp );
2591        elf_inode_ptr = hal_remote_lpt( XPTR( elf_file_cxy , &elf_file_ptr->inode ) );
2592        vfs_inode_get_name( XPTR( elf_file_cxy , elf_inode_ptr ) , elf_name );
2593    }
2594    else                                         // kernel process_zero
2595    {
2596        // TXT name and .elf name are not registered in kernel process_zero
2597        strcpy( txt_name , "txt0_rx" );
2598        txt_owner_xp = process_xp; 
2599        strcpy( elf_name , "kernel.elf" );
2600    }
2601
2602    // display process info
2603    if( txt_owner_xp == process_xp )
2604    {
2605        nolock_printk("PID %X | %s (FG) | %X | PPID %X | TS %X | %d | %s\n", 
2606        pid, txt_name, process_ptr, ppid, state, th_nr, elf_name );
2607    }
2608    else
2609    {
2610        nolock_printk("PID %X | %s (BG) | %X | PPID %X | TS %X | %d | %s\n", 
2611        pid, txt_name, process_ptr, ppid, state, th_nr, elf_name );
2612    }
2613}  // end process_display()
2614
2615
2616////////////////////////////////////////////////////////////////////////////////////////
2617//     Terminals related functions
2618////////////////////////////////////////////////////////////////////////////////////////
2619
2620//////////////////////////////////
2621uint32_t process_txt_alloc( void )
2622{
2623    uint32_t  index;       // TXT terminal index
2624    xptr_t    chdev_xp;    // extended pointer on TXT_RX chdev
2625    chdev_t * chdev_ptr;   // local pointer on TXT_RX chdev
2626    cxy_t     chdev_cxy;   // TXT_RX chdev cluster
2627    xptr_t    root_xp;     // extended pointer on owner field in chdev
2628
2629    // scan the user TXT_RX chdevs (TXT0 is reserved for kernel)
2630    for( index = 1 ; index < LOCAL_CLUSTER->nb_txt_channels ; index ++ )
2631    {
2632        // get pointers on TXT_RX[index]
2633        chdev_xp  = chdev_dir.txt_rx[index];
2634        chdev_cxy = GET_CXY( chdev_xp );
2635        chdev_ptr = GET_PTR( chdev_xp );
2636
2637        // get extended pointer on root of attached process
2638        root_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.root );
2639
2640        // return free TXT index if found
2641        if( xlist_is_empty( root_xp ) ) return index; 
2642    }
2643
2644    assert( __FUNCTION__, false , "no free TXT terminal found" );
2645
2646    return -1;
2647
2648} // end process_txt_alloc()
2649
2650/////////////////////////////////////////////
2651void process_txt_attach( xptr_t   process_xp,
2652                         uint32_t txt_id )
2653{
2654    xptr_t      chdev_xp;     // extended pointer on TXT_RX chdev
2655    cxy_t       chdev_cxy;    // TXT_RX chdev cluster
2656    chdev_t *   chdev_ptr;    // local pointer on TXT_RX chdev
2657    xptr_t      root_xp;      // extended pointer on list root in chdev
2658    xptr_t      lock_xp;      // extended pointer on list lock in chdev
2659
2660    process_t * process_ptr = GET_PTR(process_xp );
2661    cxy_t       process_cxy = GET_CXY(process_xp );
2662
2663// check process is in owner cluster
2664assert( __FUNCTION__, (process_xp == hal_remote_l64( XPTR( process_cxy , &process_ptr->owner_xp ))),
2665"process descriptor not in owner cluster" );
2666
2667// check terminal index
2668assert( __FUNCTION__, (txt_id < LOCAL_CLUSTER->nb_txt_channels) ,
2669"illegal TXT terminal index" );
2670
2671    // get pointers on TXT_RX[txt_id] chdev
2672    chdev_xp  = chdev_dir.txt_rx[txt_id];
2673    chdev_cxy = GET_CXY( chdev_xp );
2674    chdev_ptr = GET_PTR( chdev_xp );
2675
2676    // get extended pointer on root & lock of attached process list
2677    root_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.root );
2678    lock_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.lock );
2679
2680    // get lock protecting list of processes attached to TXT
2681    remote_busylock_acquire( lock_xp );
2682
2683    // insert owner process in list of attached processes to same TXT
2684    xlist_add_last( root_xp , XPTR( process_cxy , &process_ptr->txt_list ) );
2685
2686    // release lock protecting list of processes attached to TXT
2687    remote_busylock_release( lock_xp );
2688
2689#if DEBUG_PROCESS_TXT
2690thread_t * this = CURRENT_THREAD;
2691uint32_t cycle = (uint32_t)hal_get_cycles();
2692if( DEBUG_PROCESS_TXT < cycle )
2693printk("\n[%s] thread[%x,%x] attached process %x to TXT %d / cycle %d\n",
2694__FUNCTION__, this->process->pid, this->trdid,
2695hal_remote_l32( XPTR( process_cxy , &process_ptr->pid, txt_id , cycle );
2696#endif
2697
2698} // end process_txt_attach()
2699
2700/////////////////////////////////////////////
2701void process_txt_detach( xptr_t  process_xp )
2702{
2703    process_t * process_ptr;  // local pointer on process in owner cluster
2704    cxy_t       process_cxy;  // process owner cluster
2705    pid_t       process_pid;  // process identifier
2706    xptr_t      file_xp;      // extended pointer on stdin file
2707    xptr_t      chdev_xp;     // extended pointer on TXT_RX chdev
2708    cxy_t       chdev_cxy;    // TXT_RX chdev cluster
2709    chdev_t *   chdev_ptr;    // local pointer on TXT_RX chdev
2710    xptr_t      lock_xp;      // extended pointer on list lock in chdev
2711
2712    // get process cluster, local pointer, and PID
2713    process_cxy = GET_CXY( process_xp );
2714    process_ptr = GET_PTR( process_xp );
2715    process_pid = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid ) );
2716
2717// check process descriptor in owner cluster
2718assert( __FUNCTION__, (CXY_FROM_PID( process_pid ) == process_cxy ) ,
2719"process descriptor not in owner cluster" );
2720
2721    // release TXT ownership (does nothing if not TXT owner)
2722    process_txt_transfer_ownership( process_xp );
2723
2724    // get extended pointer on process stdin pseudo file
2725    file_xp = (xptr_t)hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[0] ) );
2726
2727    // get pointers on TXT_RX chdev
2728    chdev_xp  = chdev_from_file( file_xp );
2729    chdev_cxy = GET_CXY( chdev_xp );
2730    chdev_ptr = (chdev_t *)GET_PTR( chdev_xp );
2731
2732    // get extended pointer on lock protecting attached process list
2733    lock_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.lock );
2734
2735    // get lock protecting list of processes attached to TXT
2736    remote_busylock_acquire( lock_xp );
2737
2738    // unlink process from attached process list
2739    xlist_unlink( XPTR( process_cxy , &process_ptr->txt_list ) );
2740
2741    // release lock protecting list of processes attached to TXT
2742    remote_busylock_release( lock_xp );
2743
2744#if DEBUG_PROCESS_TXT
2745thread_t * this = CURRENT_THREAD;
2746uint32_t cycle  = (uint32_t)hal_get_cycles();
2747uint32_t txt_id = hal_remote_l32( XPTR( chdev_cxy , &chdev_ptr->channel ) );
2748if( DEBUG_PROCESS_TXT < cycle )
2749printk("\n[%s] thread[%x,%x] detached process %x from TXT%d / cycle %d\n",
2750__FUNCTION__, this->process->pid, this->trdid, process_pid, txt_id, cycle );
2751#endif
2752
2753} // end process_txt_detach()
2754
2755///////////////////////////////////////////////////
2756uint32_t process_txt_get_index( xptr_t process_xp )
2757{
2758
2759    // get target process cluster and local pointer
2760    process_t * process_ptr = GET_PTR( process_xp );
2761    cxy_t       process_cxy = GET_CXY( process_xp );
2762
2763assert( __FUNCTION__, (process_xp == hal_remote_l64( XPTR( process_cxy , &process_ptr->owner_xp))),
2764"process descriptor not in owner cluster" );
2765
2766    // get extended pointer on STDIN pseudo file in owner process descriptor
2767    xptr_t file_xp = hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[0]));
2768
2769assert( __FUNCTION__, (file_xp != XPTR_NULL),
2770"STDIN pseudo-file undefined in fd_array for process %x\n",
2771hal_remote_l32( XPTR( process_cxy , &process_ptr->pid ) ) );
2772
2773    // get extended pointer on TXT chdev
2774    xptr_t chdev_xp = chdev_from_file( file_xp );
2775 
2776assert( __FUNCTION__, (chdev_xp != XPTR_NULL),
2777"chdev undefined for STDIN pseudo-file of process %x\n",
2778hal_remote_l32( XPTR( process_cxy , &process_ptr->pid ) ) );
2779
2780    // get cluster and local pointer on chdev
2781   cxy_t     chdev_cxy = GET_CXY( chdev_xp );
2782   chdev_t * chdev_ptr = GET_PTR( chdev_xp );
2783 
2784   // get parent TXT terminal index
2785   return hal_remote_l32( XPTR( chdev_cxy , &chdev_ptr->channel ) );
2786
2787}  // end process_txt_get_index()
2788
2789///////////////////////////////////////////////////
2790void process_txt_set_ownership( xptr_t process_xp )
2791{
2792    process_t * process_ptr;
2793    cxy_t       process_cxy;
2794    xptr_t      file_xp;
2795    xptr_t      txt_xp;     
2796    chdev_t   * txt_ptr;
2797    cxy_t       txt_cxy;
2798
2799    // get pointers on process in owner cluster
2800    process_cxy = GET_CXY( process_xp );
2801    process_ptr = GET_PTR( process_xp );
2802
2803    // check owner cluster
2804    assert( __FUNCTION__, (process_xp == hal_remote_l64( XPTR( process_cxy , &process_ptr->owner_xp ))),
2805    "process descriptor not in owner cluster" );
2806
2807    // get extended pointer on stdin pseudo file
2808    file_xp = hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[0] ) );
2809
2810    // get pointers on TXT chdev
2811    txt_xp  = chdev_from_file( file_xp );
2812    txt_cxy = GET_CXY( txt_xp );
2813    txt_ptr = GET_PTR( txt_xp );
2814
2815    // set owner field in TXT chdev
2816    hal_remote_s64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , process_xp );
2817
2818#if DEBUG_PROCESS_TXT
2819thread_t * this = CURRENT_THREAD;
2820uint32_t cycle  = (uint32_t)hal_get_cycles();
2821uint32_t txt_id = hal_remote_l32( XPTR( txt_cxy , &txt_ptr->channel ) );
2822if( DEBUG_PROCESS_TXT < cycle )
2823printk("\n[%s] thread[%x,%x] give TXT%d ownership to process / cycle %d\n",
2824__FUNCTION__, this->process->pid, this->trdid, txt_id, cycle );
2825#endif
2826
2827}  // end process_txt_set ownership()
2828
2829////////////////////////////////////////////////////////
2830void process_txt_transfer_ownership( xptr_t process_xp )
2831{
2832    process_t * process_ptr;     // local pointer on process releasing ownership
2833    cxy_t       process_cxy;     // process cluster
2834    pid_t       process_pid;     // process identifier
2835    xptr_t      file_xp;         // extended pointer on TXT_RX pseudo file
2836    xptr_t      txt_xp;          // extended pointer on TXT_RX chdev
2837    chdev_t   * txt_ptr;         // local pointer on TXT_RX chdev
2838    cxy_t       txt_cxy;         // cluster of TXT_RX chdev
2839    uint32_t    txt_id;          // TXT_RX channel
2840    xptr_t      owner_xp;        // extended pointer on current TXT_RX owner
2841    xptr_t      root_xp;         // extended pointer on root of attached process list
2842    xptr_t      lock_xp;         // extended pointer on lock protecting attached process list
2843    xptr_t      iter_xp;         // iterator for xlist
2844    xptr_t      current_xp;      // extended pointer on current process
2845    bool_t      found;
2846
2847#if DEBUG_PROCESS_TXT
2848thread_t * this  = CURRENT_THREAD;
2849uint32_t   cycle;
2850#endif
2851
2852    // get pointers on target process
2853    process_cxy = GET_CXY( process_xp );
2854    process_ptr = GET_PTR( process_xp );
2855    process_pid = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid ) );
2856
2857// check owner cluster
2858assert( __FUNCTION__, (process_cxy == CXY_FROM_PID( process_pid )) ,
2859"process descriptor not in owner cluster" );
2860
2861    // get extended pointer on stdin pseudo file
2862    file_xp = hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[0] ) );
2863
2864    // get pointers on TXT chdev
2865    txt_xp  = chdev_from_file( file_xp );
2866    txt_cxy = GET_CXY( txt_xp );
2867    txt_ptr = GET_PTR( txt_xp );
2868
2869    // get relevant infos from chdev descriptor
2870    owner_xp = hal_remote_l64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) );
2871    txt_id   = hal_remote_l32( XPTR( txt_cxy , &txt_ptr->channel ) );
2872
2873    // transfer ownership only if target process is the TXT owner
2874    if( (owner_xp == process_xp) && (txt_id > 0) ) 
2875    {
2876        // get extended pointers on root and lock of attached processes list
2877        root_xp = XPTR( txt_cxy , &txt_ptr->ext.txt.root );
2878        lock_xp = XPTR( txt_cxy , &txt_ptr->ext.txt.lock );
2879
2880        if( process_get_ppid( process_xp ) != 1 )       // target process is not KSH
2881        {
2882            // get lock
2883            remote_busylock_acquire( lock_xp );
2884
2885            // scan attached process list to find KSH process
2886            found = false;
2887            for( iter_xp = hal_remote_l64( root_xp ) ;
2888                 (iter_xp != root_xp) && (found == false) ;
2889                 iter_xp = hal_remote_l64( iter_xp ) )
2890            {
2891                current_xp = XLIST_ELEMENT( iter_xp , process_t , txt_list );
2892
2893                if( process_get_ppid( current_xp ) == 1 )  // current is KSH
2894                {
2895                    // set owner field in TXT chdev
2896                    hal_remote_s64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , current_xp );
2897
2898#if DEBUG_PROCESS_TXT
2899cycle = (uint32_t)hal_get_cycles();
2900if( DEBUG_PROCESS_TXT < cycle )
2901printk("\n[%s] thread[%x,%x] transfered TXT%d ownership to KSH / cycle %d\n",
2902__FUNCTION__, this->process->pid, this->trdid, txt_id, cycle );
2903#endif
2904                    found = true;
2905                }
2906            }
2907
2908            // release lock
2909            remote_busylock_release( lock_xp );
2910
2911// It must exist a KSH process for each user TXT channel
2912assert( __FUNCTION__, (found == true), "KSH process not found for TXT%d", txt_id );
2913
2914        }
2915        else                                           // target process is KSH
2916        {
2917            // get lock
2918            remote_busylock_acquire( lock_xp );
2919
2920            // scan attached process list to find another process
2921            found = false;
2922            for( iter_xp = hal_remote_l64( root_xp ) ;
2923                 (iter_xp != root_xp) && (found == false) ;
2924                 iter_xp = hal_remote_l64( iter_xp ) )
2925            {
2926                current_xp  = XLIST_ELEMENT( iter_xp , process_t , txt_list );
2927
2928                if( current_xp != process_xp )            // current is not KSH
2929                {
2930                    // set owner field in TXT chdev
2931                    hal_remote_s64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , current_xp );
2932
2933#if DEBUG_PROCESS_TXT
2934cycle  = (uint32_t)hal_get_cycles();
2935cxy_t       current_cxy = GET_CXY( current_xp );
2936process_t * current_ptr = GET_PTR( current_xp );
2937uint32_t    new_pid     = hal_remote_l32( XPTR( current_cxy , &current_ptr->pid ) );
2938if( DEBUG_PROCESS_TXT < cycle )
2939printk("\n[%s] thread[%x,%x] transfered TXT%d ownership to process %x / cycle %d\n",
2940__FUNCTION__,this->process->pid, this->trdid, txt_id, new_pid, cycle );
2941#endif
2942                    found = true;
2943                }
2944            }
2945
2946            // release lock
2947            remote_busylock_release( lock_xp );
2948
2949            // no more owner for TXT if no other process found
2950            if( found == false )
2951            {
2952                // set owner field in TXT chdev
2953                hal_remote_s64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , XPTR_NULL );
2954
2955#if DEBUG_PROCESS_TXT
2956cycle = (uint32_t)hal_get_cycles();
2957if( DEBUG_PROCESS_TXT < cycle )
2958printk("\n[%s] thread[%x,%x] released TXT%d (no attached process) / cycle %d\n",
2959__FUNCTION__, this->process->pid, this->trdid, txt_id, cycle );
2960#endif
2961            }
2962        }
2963    }
2964    else
2965    {
2966
2967#if DEBUG_PROCESS_TXT
2968cycle = (uint32_t)hal_get_cycles();
2969if( DEBUG_PROCESS_TXT < cycle )
2970printk("\n[%s] thread[%x,%x] does nothing for process %x (not TXT owner) / cycle %d\n",
2971__FUNCTION__, this->process->pid, this->trdid, process_pid, cycle );
2972#endif
2973
2974    }
2975
2976}  // end process_txt_transfer_ownership()
2977
2978
2979////////////////////////////////////////////////
2980bool_t process_txt_is_owner( xptr_t process_xp )
2981{
2982    // get local pointer and cluster of process in owner cluster
2983    cxy_t       process_cxy = GET_CXY( process_xp );
2984    process_t * process_ptr = GET_PTR( process_xp );
2985
2986// check calling thread execute in target process owner cluster
2987pid_t process_pid = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid ) );
2988assert( __FUNCTION__, (process_cxy == CXY_FROM_PID( process_pid )) ,
2989"process descriptor not in owner cluster" );
2990
2991    // get extended pointer on stdin pseudo file
2992    xptr_t file_xp = hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[0] ) );
2993
2994    // get pointers on TXT chdev
2995    xptr_t    txt_xp  = chdev_from_file( file_xp );
2996    cxy_t     txt_cxy = GET_CXY( txt_xp );
2997    chdev_t * txt_ptr = GET_PTR( txt_xp );
2998
2999    // get extended pointer on TXT_RX owner process
3000    xptr_t owner_xp = hal_remote_l64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) );
3001
3002    return (process_xp == owner_xp);
3003
3004}   // end process_txt_is_owner()
3005
3006////////////////////////////////////////////////     
3007xptr_t process_txt_get_owner( uint32_t channel )
3008{
3009    xptr_t      txt_rx_xp  = chdev_dir.txt_rx[channel];
3010    cxy_t       txt_rx_cxy = GET_CXY( txt_rx_xp );
3011    chdev_t *   txt_rx_ptr = GET_PTR( txt_rx_xp );
3012
3013    return (xptr_t)hal_remote_l64( XPTR( txt_rx_cxy , &txt_rx_ptr->ext.txt.owner_xp ) );
3014
3015}  // end process_txt_get_owner()
3016
3017///////////////////////////////////////////
3018void process_txt_display( uint32_t txt_id )
3019{
3020    xptr_t      chdev_xp;
3021    cxy_t       chdev_cxy;
3022    chdev_t   * chdev_ptr;
3023    xptr_t      root_xp;
3024    xptr_t      lock_xp;
3025    xptr_t      current_xp;
3026    xptr_t      iter_xp;
3027    cxy_t       txt0_cxy;
3028    chdev_t   * txt0_ptr;
3029    xptr_t      txt0_xp;
3030    xptr_t      txt0_lock_xp;
3031   
3032    assert( __FUNCTION__, (txt_id < LOCAL_CLUSTER->nb_txt_channels) ,
3033    "illegal TXT terminal index" );
3034
3035    // get pointers on TXT0 chdev
3036    txt0_xp  = chdev_dir.txt_tx[0];
3037    txt0_cxy = GET_CXY( txt0_xp );
3038    txt0_ptr = GET_PTR( txt0_xp );
3039
3040    // get extended pointer on TXT0 lock
3041    txt0_lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock );
3042
3043    // get pointers on TXT_RX[txt_id] chdev
3044    chdev_xp  = chdev_dir.txt_rx[txt_id];
3045    chdev_cxy = GET_CXY( chdev_xp );
3046    chdev_ptr = GET_PTR( chdev_xp );
3047
3048    // get extended pointer on root & lock of attached process list
3049    root_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.root );
3050    lock_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.lock );
3051
3052    // get lock on attached process list
3053    remote_busylock_acquire( lock_xp );
3054
3055    // get TXT0 lock in busy waiting mode
3056    remote_busylock_acquire( txt0_lock_xp );
3057
3058    // display header
3059    nolock_printk("\n***** processes attached to TXT_%d / cycle %d\n",
3060    txt_id , (uint32_t)hal_get_cycles() );
3061
3062    // scan attached process list
3063    XLIST_FOREACH( root_xp , iter_xp )
3064    {
3065        current_xp  = XLIST_ELEMENT( iter_xp , process_t , txt_list );
3066        process_display( current_xp );
3067    }
3068
3069    // release TXT0 lock in busy waiting mode
3070    remote_busylock_release( txt0_lock_xp );
3071
3072    // release lock on attached process list
3073    remote_busylock_release( lock_xp );
3074
3075}  // end process_txt_display
Note: See TracBrowser for help on using the repository browser.