source: trunk/kernel/kern/process.c @ 633

Last change on this file since 633 was 633, checked in by alain, 5 years ago

cosmetic

File size: 83.1 KB
Line 
1/*
2 * process.c - process related functions definition.
3 *
4 * Authors  Ghassan Almaless (2008,2009,2010,2011,2012)
5 *          Mohamed Lamine Karaoui (2015)
6 *          Alain Greiner (2016,2017,2018,2019)
7 *
8 * Copyright (c) UPMC Sorbonne Universites
9 *
10 * This file is part of ALMOS-MKH.
11 *
12 * ALMOS-MKH is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; version 2.0 of the License.
15 *
16 * ALMOS-MKH is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with ALMOS-MKH; if not, write to the Free Software Foundation,
23 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24 */
25
26#include <kernel_config.h>
27#include <hal_kernel_types.h>
28#include <hal_remote.h>
29#include <hal_uspace.h>
30#include <hal_irqmask.h>
31#include <hal_vmm.h>
32#include <errno.h>
33#include <printk.h>
34#include <memcpy.h>
35#include <bits.h>
36#include <kmem.h>
37#include <page.h>
38#include <vmm.h>
39#include <vfs.h>
40#include <core.h>
41#include <thread.h>
42#include <chdev.h>
43#include <list.h>
44#include <string.h>
45#include <scheduler.h>
46#include <busylock.h>
47#include <queuelock.h>
48#include <remote_queuelock.h>
49#include <rwlock.h>
50#include <remote_rwlock.h>
51#include <dqdt.h>
52#include <cluster.h>
53#include <ppm.h>
54#include <boot_info.h>
55#include <process.h>
56#include <elf.h>
57#include <syscalls.h>
58#include <shared_syscalls.h>
59
60//////////////////////////////////////////////////////////////////////////////////////////
61// Extern global variables
62//////////////////////////////////////////////////////////////////////////////////////////
63
64extern process_t           process_zero;     // allocated in kernel_init.c
65extern chdev_directory_t   chdev_dir;        // allocated in kernel_init.c
66
67//////////////////////////////////////////////////////////////////////////////////////////
68// Process initialisation related functions
69//////////////////////////////////////////////////////////////////////////////////////////
70
71/////////////////////////////////
72process_t * process_alloc( void )
73{
74        kmem_req_t   req;
75
76    req.type  = KMEM_PROCESS;
77        req.size  = sizeof(process_t);
78        req.flags = AF_KERNEL;
79
80    return (process_t *)kmem_alloc( &req );
81}
82
83////////////////////////////////////////
84void process_free( process_t * process )
85{
86    kmem_req_t  req;
87
88        req.type = KMEM_PROCESS;
89        req.ptr  = process;
90        kmem_free( &req );
91}
92
93////////////////////////////////////////////////////
94error_t process_reference_init( process_t * process,
95                                pid_t       pid,
96                                xptr_t      parent_xp )
97{
98    error_t     error;
99    xptr_t      process_xp;
100    cxy_t       parent_cxy;
101    process_t * parent_ptr;
102    xptr_t      stdin_xp;
103    xptr_t      stdout_xp;
104    xptr_t      stderr_xp;
105    uint32_t    stdin_id;
106    uint32_t    stdout_id;
107    uint32_t    stderr_id;
108    uint32_t    txt_id;
109    char        rx_path[40];
110    char        tx_path[40];
111    xptr_t      file_xp;
112    xptr_t      chdev_xp;
113    chdev_t   * chdev_ptr;
114    cxy_t       chdev_cxy;
115    pid_t       parent_pid;
116    vmm_t     * vmm;
117
118    // build extended pointer on this reference process
119    process_xp = XPTR( local_cxy , process );
120
121    // get pointer on process vmm
122    vmm = &process->vmm;
123
124    // get parent process cluster and local pointer
125    parent_cxy = GET_CXY( parent_xp );
126    parent_ptr = GET_PTR( parent_xp );
127
128    // get parent_pid
129    parent_pid = hal_remote_l32( XPTR( parent_cxy , &parent_ptr->pid ) );
130
131#if DEBUG_PROCESS_REFERENCE_INIT
132thread_t * this = CURRENT_THREAD;
133uint32_t cycle = (uint32_t)hal_get_cycles();
134if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
135printk("\n[%s] thread[%x,%x] enter to initialize process %x / cycle %d\n",
136__FUNCTION__, this->process->pid, this->trdid, pid, cycle );
137#endif
138
139    // initialize pid, ref_xp, parent_xp, owner_xp, term_state fields
140        process->pid        = pid;
141    process->ref_xp     = XPTR( local_cxy , process );
142    process->owner_xp   = XPTR( local_cxy , process );
143    process->parent_xp  = parent_xp;
144    process->term_state = 0;
145
146    // initialize VFS root inode and CWD inode
147    process->vfs_root_xp = hal_remote_l64( XPTR( parent_cxy, &parent_ptr->vfs_root_xp ) );
148    process->cwd_xp      = hal_remote_l64( XPTR( parent_cxy, &parent_ptr->cwd_xp ) );
149
150    // initialize VSL as empty
151    vmm->vsegs_nr = 0;
152        xlist_root_init( XPTR( local_cxy , &vmm->vsegs_root ) );
153
154    // create an empty GPT as required by the architecture
155    error = hal_gpt_create( &vmm->gpt );
156    if( error ) 
157    {
158        printk("\n[ERROR] in %s : cannot create empty GPT\n", __FUNCTION__ );
159        return -1;
160    }
161
162#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
163if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
164printk("\n[%s] thread[%x,%x] created empty GPT for process %x\n",
165__FUNCTION__, parent_pid, this->trdid, pid );
166#endif
167
168    // initialize VSL locks
169        remote_rwlock_init( XPTR( local_cxy , &vmm->vsl_lock ) , LOCK_VMM_VSL );
170
171    // register kernel vsegs in VMM as required by the architecture
172    error = hal_vmm_kernel_update( process );
173    if( error ) 
174    {
175        printk("\n[ERROR] in %s : cannot register kernel vsegs in VMM\n", __FUNCTION__ );
176        return -1;
177    }
178
179#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
180if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
181printk("\n[%s] thread[%x,%x] registered kernel vsegs for process %x\n",
182__FUNCTION__, parent_pid, this->trdid, pid );
183#endif
184
185    // create "args" and "envs" vsegs
186    // create "stacks" and "mmap" vsegs allocators
187    // initialize locks protecting GPT and VSL
188    error = vmm_user_init( process );
189    if( error ) 
190    {
191        printk("\n[ERROR] in %s : cannot register user vsegs in VMM\n", __FUNCTION__ );
192        return -1;
193    }
194 
195#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
196cycle = (uint32_t)hal_get_cycles();
197if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
198printk("\n[%s] thread[%x,%x] initialized vmm for process %x\n", 
199__FUNCTION__, parent_pid, this->trdid, pid );
200#endif
201
202    // initialize fd_array as empty
203    process_fd_init( process );
204
205    // define the stdin/stdout/stderr pseudo files <=> select a TXT terminal.
206    if( (pid == 1) || (parent_pid  == 1) )      // INIT or KSH  process
207    {
208        // select a TXT channel
209        if( pid == 1 )  txt_id = 0;                     // INIT
210        else            txt_id = process_txt_alloc();   // KSH
211
212        // attach process to TXT
213        process_txt_attach( process , txt_id ); 
214
215#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
216cycle = (uint32_t)hal_get_cycles();
217if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
218printk("\n[%s] thread[%x,%x] / process %x attached to TXT%d / cycle %d\n", 
219__FUNCTION__, parent_pid, this->trdid, pid, txt_id, cycle );
220#endif
221        // build path to TXT_RX[i] and TXT_TX[i] chdevs
222        snprintf( rx_path , 40 , "/dev/external/txt%d_rx", txt_id );
223        snprintf( tx_path , 40 , "/dev/external/txt%d_tx", txt_id );
224
225        // create stdin pseudo file         
226        error = vfs_open(  process->vfs_root_xp,
227                           rx_path,
228                           process_xp,
229                           O_RDONLY, 
230                           0,                // FIXME chmod
231                           &stdin_xp, 
232                           &stdin_id );
233        if( error )
234        {
235            printk("\n[ERROR] in %s : cannot open stdout pseudo-file\n", __FUNCTION__ );
236            return -1;
237        }
238
239assert( (stdin_id == 0) , "stdin index must be 0" );
240
241#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
242cycle = (uint32_t)hal_get_cycles();
243if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
244printk("\n[%s] thread[%x,%x] / stdin open for process %x / cycle %d\n", 
245__FUNCTION__, parent_pid, this->trdid, pid, cycle );
246#endif
247
248        // create stdout pseudo file         
249        error = vfs_open(  process->vfs_root_xp,
250                           tx_path,
251                           process_xp,
252                           O_WRONLY, 
253                           0,                // FIXME chmod
254                           &stdout_xp, 
255                           &stdout_id );
256        if( error )
257        {
258            printk("\n[ERROR] in %s : cannot open stdout pseudo-file\n", __FUNCTION__ );
259            return -1;
260        }
261
262assert( (stdout_id == 1) , "stdout index must be 1" );
263
264#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
265cycle = (uint32_t)hal_get_cycles();
266if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
267printk("\n[%s] thread[%x,%x] / stdout open for process %x / cycle %d\n", 
268__FUNCTION__, parent_pid, this->trdid, pid, cycle );
269#endif
270
271        // create stderr pseudo file         
272        error = vfs_open(  process->vfs_root_xp,
273                           tx_path,
274                           process_xp,
275                           O_WRONLY, 
276                           0,                // FIXME chmod
277                           &stderr_xp, 
278                           &stderr_id );
279        if( error )
280        {
281            printk("\n[ERROR] in %s : cannot open stderr pseudo-file\n", __FUNCTION__ );
282            return -1;
283        }
284
285assert( (stderr_id == 2) , "stderr index must be 2" );
286
287#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
288cycle = (uint32_t)hal_get_cycles();
289if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
290printk("\n[%s] thread[%x,%x] / stderr open for process %x / cycle %d\n", 
291__FUNCTION__, parent_pid, this->trdid, pid, cycle );
292#endif
293
294    }
295    else                                            // normal user process
296    {
297        // get extended pointer on stdin pseudo file in parent process
298        file_xp = (xptr_t)hal_remote_l64( XPTR( parent_cxy,
299                                                &parent_ptr->fd_array.array[0] ) );
300
301        // get extended pointer on parent process TXT chdev
302        chdev_xp = chdev_from_file( file_xp );
303 
304        // get cluster and local pointer on chdev
305        chdev_cxy = GET_CXY( chdev_xp );
306        chdev_ptr = GET_PTR( chdev_xp );
307 
308        // get parent process TXT terminal index
309        txt_id = hal_remote_l32( XPTR( chdev_cxy , &chdev_ptr->channel ) );
310
311        // attach child process to parent process TXT terminal
312        process_txt_attach( process , txt_id ); 
313
314        // copy all open files from parent process fd_array to this process
315        process_fd_remote_copy( XPTR( local_cxy , &process->fd_array ),
316                                XPTR( parent_cxy , &parent_ptr->fd_array ) );
317    }
318
319    // initialize lock protecting CWD changes
320    remote_busylock_init( XPTR( local_cxy , 
321                                &process->cwd_lock ), LOCK_PROCESS_CWD );
322
323#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
324cycle = (uint32_t)hal_get_cycles();
325if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
326printk("\n[%s] thread[%x,%x] / set fd_array for process %x / cycle %d\n", 
327__FUNCTION__, parent_pid, this->trdid, pid , cycle );
328#endif
329
330    // reset children list root
331    xlist_root_init( XPTR( local_cxy , &process->children_root ) );
332    process->children_nr     = 0;
333    remote_queuelock_init( XPTR( local_cxy,
334                                 &process->children_lock ), LOCK_PROCESS_CHILDREN );
335
336    // reset semaphore / mutex / barrier / condvar list roots and lock
337    xlist_root_init( XPTR( local_cxy , &process->sem_root ) );
338    xlist_root_init( XPTR( local_cxy , &process->mutex_root ) );
339    xlist_root_init( XPTR( local_cxy , &process->barrier_root ) );
340    xlist_root_init( XPTR( local_cxy , &process->condvar_root ) );
341    remote_queuelock_init( XPTR( local_cxy , 
342                                 &process->sync_lock ), LOCK_PROCESS_USERSYNC );
343
344    // reset open directories root and lock
345    xlist_root_init( XPTR( local_cxy , &process->dir_root ) );
346    remote_queuelock_init( XPTR( local_cxy , 
347                                 &process->dir_lock ), LOCK_PROCESS_DIR );
348
349    // register new process in the local cluster manager pref_tbl[]
350    lpid_t lpid = LPID_FROM_PID( pid );
351    LOCAL_CLUSTER->pmgr.pref_tbl[lpid] = XPTR( local_cxy , process );
352
353    // register new process descriptor in local cluster manager local_list
354    cluster_process_local_link( process );
355
356    // register new process descriptor in local cluster manager copies_list
357    cluster_process_copies_link( process );
358
359    // initialize th_tbl[] array and associated threads
360    uint32_t i;
361
362    for( i = 0 ; i < CONFIG_THREADS_MAX_PER_CLUSTER ; i++ )
363        {
364        process->th_tbl[i] = NULL;
365    }
366    process->th_nr  = 0;
367    rwlock_init( &process->th_lock , LOCK_PROCESS_THTBL );
368
369        hal_fence();
370
371#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
372cycle = (uint32_t)hal_get_cycles();
373if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
374printk("\n[%s] thread[%x,%x] exit for process %x / cycle %d\n", 
375__FUNCTION__, parent_pid, this->trdid, pid, cycle );
376#endif
377
378    return 0;
379
380}  // process_reference_init()
381
382/////////////////////////////////////////////////////
383error_t process_copy_init( process_t * local_process,
384                           xptr_t      reference_process_xp )
385{
386    error_t   error;
387    vmm_t   * vmm;
388
389    // get reference process cluster and local pointer
390    cxy_t       ref_cxy = GET_CXY( reference_process_xp );
391    process_t * ref_ptr = GET_PTR( reference_process_xp );
392
393    // get pointer on process vmm
394    vmm = &local_process->vmm;
395
396    // initialize PID, REF_XP, PARENT_XP, and STATE
397    local_process->pid        = hal_remote_l32(  XPTR( ref_cxy , &ref_ptr->pid ) );
398    local_process->parent_xp  = hal_remote_l64( XPTR( ref_cxy , &ref_ptr->parent_xp ) );
399    local_process->ref_xp     = reference_process_xp;
400    local_process->owner_xp   = reference_process_xp;
401    local_process->term_state = 0;
402
403#if DEBUG_PROCESS_COPY_INIT
404thread_t * this = CURRENT_THREAD; 
405uint32_t cycle = (uint32_t)hal_get_cycles();
406if( DEBUG_PROCESS_COPY_INIT < cycle )
407printk("\n[%s] thread[%x,%x] enter for process %x / cycle %d\n",
408__FUNCTION__, this->process->pid, this->trdid, local_process->pid, cycle );
409#endif
410
411// check user process
412assert( (local_process->pid != 0), "LPID cannot be 0" );
413
414    // initialize VSL as empty
415    vmm->vsegs_nr = 0;
416        xlist_root_init( XPTR( local_cxy , &vmm->vsegs_root ) );
417
418    // create an empty GPT as required by the architecture
419    error = hal_gpt_create( &vmm->gpt );
420    if( error ) 
421    {
422        printk("\n[ERROR] in %s : cannot create empty GPT\n", __FUNCTION__ );
423        return -1;
424    }
425
426    // initialize GPT and VSL locks
427        remote_rwlock_init( XPTR( local_cxy , &vmm->vsl_lock ) , LOCK_VMM_VSL );
428
429    // register kernel vsegs in VMM as required by the architecture
430    error = hal_vmm_kernel_update( local_process );
431    if( error ) 
432    {
433        printk("\n[ERROR] in %s : cannot register kernel vsegs in VMM\n", __FUNCTION__ );
434        return -1;
435    }
436
437    // create "args" and "envs" vsegs
438    // create "stacks" and "mmap" vsegs allocators
439    // initialize locks protecting GPT and VSL
440    error = vmm_user_init( local_process );
441    if( error ) 
442    {
443        printk("\n[ERROR] in %s : cannot register user vsegs in VMM\n", __FUNCTION__ );
444        return -1;
445    }
446 
447#if (DEBUG_PROCESS_COPY_INIT & 1)
448cycle = (uint32_t)hal_get_cycles();
449if( DEBUG_PROCESS_COPY_INIT < cycle )
450printk("\n[%s] thread[%x,%x] initialized vmm for process %x / cycle %d\n", 
451__FUNCTION__, parent_pid, this->trdid, pid, cycle );
452#endif
453
454    // set process file descriptors array
455        process_fd_init( local_process );
456
457    // set vfs_root_xp / vfs_bin_xp / cwd_xp fields
458    local_process->vfs_root_xp = hal_remote_l64( XPTR( ref_cxy , &ref_ptr->vfs_root_xp ) );
459    local_process->vfs_bin_xp  = hal_remote_l64( XPTR( ref_cxy , &ref_ptr->vfs_bin_xp ) );
460    local_process->cwd_xp      = XPTR_NULL;
461
462    // reset children list root (not used in a process descriptor copy)
463    xlist_root_init( XPTR( local_cxy , &local_process->children_root ) );
464    local_process->children_nr   = 0;
465    remote_queuelock_init( XPTR( local_cxy , &local_process->children_lock ),
466                           LOCK_PROCESS_CHILDREN );
467
468    // reset children_list (not used in a process descriptor copy)
469    xlist_entry_init( XPTR( local_cxy , &local_process->children_list ) );
470
471    // reset semaphores list root (not used in a process descriptor copy)
472    xlist_root_init( XPTR( local_cxy , &local_process->sem_root ) );
473    xlist_root_init( XPTR( local_cxy , &local_process->mutex_root ) );
474    xlist_root_init( XPTR( local_cxy , &local_process->barrier_root ) );
475    xlist_root_init( XPTR( local_cxy , &local_process->condvar_root ) );
476
477    // initialize th_tbl[] array and associated fields
478    uint32_t i;
479    for( i = 0 ; i < CONFIG_THREADS_MAX_PER_CLUSTER ; i++ )
480        {
481        local_process->th_tbl[i] = NULL;
482    }
483    local_process->th_nr  = 0;
484    rwlock_init( &local_process->th_lock , LOCK_PROCESS_THTBL );
485
486    // register new process descriptor in local cluster manager local_list
487    cluster_process_local_link( local_process );
488
489    // register new process descriptor in owner cluster manager copies_list
490    cluster_process_copies_link( local_process );
491
492        hal_fence();
493
494#if DEBUG_PROCESS_COPY_INIT
495cycle = (uint32_t)hal_get_cycles();
496if( DEBUG_PROCESS_COPY_INIT < cycle )
497printk("\n[%s] thread[%x,%x] exit for process %x / cycle %d\n",
498__FUNCTION__, this->process->pid, this->trdid, local_process->pid, cycle );
499#endif
500
501    return 0;
502
503} // end process_copy_init()
504
505///////////////////////////////////////////
506void process_destroy( process_t * process )
507{
508    xptr_t      parent_xp;
509    process_t * parent_ptr;
510    cxy_t       parent_cxy;
511    xptr_t      children_lock_xp;
512    xptr_t      children_nr_xp;
513
514    pid_t       pid = process->pid;
515
516// check no more threads
517assert( (process->th_nr == 0),
518"process %x in cluster %x contains threads", pid , local_cxy );
519
520#if DEBUG_PROCESS_DESTROY
521thread_t * this = CURRENT_THREAD;
522uint32_t cycle = (uint32_t)hal_get_cycles();
523if( DEBUG_PROCESS_DESTROY < cycle )
524printk("\n[%s] thread[%x,%x] enter for process %x in cluster %x / cycle %d\n",
525__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy, cycle );
526#endif
527
528    // Destroy VMM
529    vmm_destroy( process );
530
531#if (DEBUG_PROCESS_DESTROY & 1)
532if( DEBUG_PROCESS_DESTROY < cycle )
533printk("\n[%s] thread[%x,%x] destroyed VMM for process %x in cluster %x\n",
534__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy );
535#endif
536
537    // remove process from local_list in local cluster manager
538    cluster_process_local_unlink( process );
539
540#if (DEBUG_PROCESS_DESTROY & 1)
541if( DEBUG_PROCESS_DESTROY < cycle )
542printk("\n[%s] thread[%x,%x] removed process %x in cluster %x from local list\n",
543__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy );
544#endif
545
546    // remove process from copies_list in owner cluster manager
547    cluster_process_copies_unlink( process );
548
549#if (DEBUG_PROCESS_DESTROY & 1)
550if( DEBUG_PROCESS_DESTROY < cycle )
551printk("\n[%s] thread[%x,%x] removed process %x in cluster %x from copies list\n",
552__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy );
553#endif
554
555    // when target process cluster is the owner cluster
556    // - remove process from TXT list and transfer ownership
557    // - remove process from children_list
558    // - release PID
559    if( CXY_FROM_PID( pid ) == local_cxy )
560    {
561        process_txt_detach( XPTR( local_cxy , process ) );
562
563#if (DEBUG_PROCESS_DESTROY & 1)
564if( DEBUG_PROCESS_DESTROY < cycle )
565printk("\n[%s] thread[%x,%x] removed process %x from TXT list\n",
566__FUNCTION__, this->process->pid, this->trdid, pid );
567#endif
568
569        // get pointers on parent process
570        parent_xp  = process->parent_xp;
571        parent_cxy = GET_CXY( parent_xp );
572        parent_ptr = GET_PTR( parent_xp );
573
574        // get extended pointer on children_lock in parent process
575        children_lock_xp = XPTR( parent_cxy , &parent_ptr->children_lock );
576        children_nr_xp   = XPTR( parent_cxy , &parent_ptr->children_nr );
577
578        // remove process from children_list
579        remote_queuelock_acquire( children_lock_xp );
580        xlist_unlink( XPTR( local_cxy , &process->children_list ) );
581            hal_remote_atomic_add( children_nr_xp , -1 );
582        remote_queuelock_release( children_lock_xp );
583
584#if (DEBUG_PROCESS_DESTROY & 1)
585if( DEBUG_PROCESS_DESTROY < cycle )
586printk("\n[%s] thread[%x,%x] removed process %x from parent process children list\n",
587__FUNCTION__, this->process->pid, this->trdid, pid );
588#endif
589
590        // release the process PID to cluster manager
591        cluster_pid_release( pid );
592
593#if (DEBUG_PROCESS_DESTROY & 1)
594if( DEBUG_PROCESS_DESTROY < cycle )
595printk("\n[%s] thread[%x,%x] released process PID %x to pmgr in cluster %x\n",
596__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy );
597#endif
598
599    }
600
601    // FIXME decrement the refcount on file pointer for vfs_bin_xp [AG]
602
603    // FIXME close all open files [AG]
604
605    // FIXME synchronize dirty files [AG]
606
607    // release memory allocated to process descriptor
608    process_free( process );
609
610#if DEBUG_PROCESS_DESTROY
611cycle = (uint32_t)hal_get_cycles();
612if( DEBUG_PROCESS_DESTROY < cycle )
613printk("\n[%s] thread[%x,%x] exit / process %x in cluster %x / cycle %d\n",
614__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy, cycle );
615#endif
616
617}  // end process_destroy()
618
619///////////////////////////////////////////////////////////////////
620const char * process_action_str( process_sigactions_t action_type )
621{
622    switch ( action_type )
623    {
624        case BLOCK_ALL_THREADS:   return "BLOCK";
625        case UNBLOCK_ALL_THREADS: return "UNBLOCK";
626        case DELETE_ALL_THREADS:  return "DELETE";
627        default:                  return "undefined";
628    }
629}
630
631////////////////////////////////////////
632void process_sigaction( pid_t       pid,
633                        uint32_t    type )
634{
635    cxy_t              owner_cxy;         // owner cluster identifier
636    lpid_t             lpid;              // process index in owner cluster
637    cluster_t        * cluster;           // pointer on cluster manager
638    xptr_t             root_xp;           // extended pointer on root of copies
639    xptr_t             lock_xp;           // extended pointer on lock protecting copies
640    xptr_t             iter_xp;           // iterator on copies list
641    xptr_t             process_xp;        // extended pointer on process copy
642    cxy_t              process_cxy;       // process copy cluster identifier
643    process_t        * process_ptr;       // local pointer on process copy
644    reg_t              save_sr;           // for critical section
645    thread_t         * client;            // pointer on client thread
646    xptr_t             client_xp;         // extended pointer on client thread
647    process_t        * local;             // pointer on process copy in local cluster
648    uint32_t           remote_nr;         // number of remote process copies
649    rpc_desc_t         rpc;               // shared RPC descriptor
650    uint32_t           responses;         // shared RPC responses counter
651
652    client    = CURRENT_THREAD;
653    client_xp = XPTR( local_cxy , client );
654    local     = NULL;
655    remote_nr = 0;
656
657    // check calling thread can yield
658    thread_assert_can_yield( client , __FUNCTION__ );
659
660#if DEBUG_PROCESS_SIGACTION
661uint32_t cycle = (uint32_t)hal_get_cycles();
662if( DEBUG_PROCESS_SIGACTION < cycle )
663printk("\n[%s] thread[%x,%x] enter to %s process %x / cycle %d\n",
664__FUNCTION__ , client->process->pid, client->trdid,
665process_action_str( type ) , pid , cycle );
666#endif
667
668    // get pointer on local cluster manager
669    cluster = LOCAL_CLUSTER;
670
671    // get owner cluster identifier and process lpid
672    owner_cxy = CXY_FROM_PID( pid );
673    lpid      = LPID_FROM_PID( pid );
674
675    // get root of list of copies and lock from owner cluster
676    root_xp   = XPTR( owner_cxy , &cluster->pmgr.copies_root[lpid] );
677    lock_xp   = XPTR( owner_cxy , &cluster->pmgr.copies_lock[lpid] );
678
679// check action type
680assert( ((type == DELETE_ALL_THREADS ) ||
681         (type == BLOCK_ALL_THREADS )  ||
682         (type == UNBLOCK_ALL_THREADS )), "illegal action type" );
683             
684    // This client thread send parallel RPCs to all remote clusters containing
685    // target process copies, wait all responses, and then handles directly
686    // the threads in local cluster, when required.
687    // The client thread allocates a - shared - RPC descriptor in the stack,
688    // because all parallel, non-blocking, server threads use the same input
689    // arguments, and use the shared RPC response field
690
691    // mask IRQs
692    hal_disable_irq( &save_sr);
693
694    // client thread blocks itself
695    thread_block( client_xp , THREAD_BLOCKED_RPC );
696
697    // initialize RPC responses counter
698    responses = 0;
699
700    // initialize shared RPC descriptor
701    // can be shared, because no out arguments
702    rpc.rsp       = &responses;
703    rpc.blocking  = false;
704    rpc.index     = RPC_PROCESS_SIGACTION;
705    rpc.thread    = client;
706    rpc.lid       = client->core->lid;
707    rpc.args[0]   = pid;
708    rpc.args[1]   = type;
709
710    // take the lock protecting process copies
711    remote_queuelock_acquire( lock_xp );
712
713    // scan list of process copies
714    XLIST_FOREACH( root_xp , iter_xp )
715    {
716        // get extended pointers and cluster on process
717        process_xp  = XLIST_ELEMENT( iter_xp , process_t , copies_list );
718        process_cxy = GET_CXY( process_xp );
719        process_ptr = GET_PTR( process_xp );
720
721        if( process_cxy == local_cxy )    // process copy is local
722        { 
723            local = process_ptr;
724        }
725        else                              // process copy is remote
726        {
727            // update number of remote process copies
728            remote_nr++;
729
730            // atomically increment RPC responses counter
731            hal_atomic_add( &responses , 1 );
732
733#if DEBUG_PROCESS_SIGACTION
734if( DEBUG_PROCESS_SIGACTION < cycle )
735printk("\n[%s] thread[%x,%x] send RPC to cluster %x for process %x\n",
736__FUNCTION__, client->process->pid, client->trdid, process_cxy, pid );
737#endif
738            // call RPC in target cluster
739            rpc_send( process_cxy , &rpc );
740        }
741    }  // end list of copies
742
743    // release the lock protecting process copies
744    remote_queuelock_release( lock_xp );
745
746    // restore IRQs
747    hal_restore_irq( save_sr);
748
749    // - if there is remote process copies, the client thread deschedules,
750    //   (it will be unblocked by the last RPC server thread).
751    // - if there is no remote copies, the client thread unblock itself.
752    if( remote_nr )
753    {
754        sched_yield("blocked on rpc_process_sigaction");
755    } 
756    else
757    {
758        thread_unblock( client_xp , THREAD_BLOCKED_RPC );
759    }
760
761    // handle the local process copy if required
762    if( local != NULL )
763    {
764
765#if DEBUG_PROCESS_SIGACTION
766if( DEBUG_PROCESS_SIGACTION < cycle )
767printk("\n[%s] thread[%x,%x] handles local process %x in cluster %x\n",
768__FUNCTION__, client->process->pid, client->trdid, pid , local_cxy );
769#endif
770        if     (type == DELETE_ALL_THREADS  ) process_delete_threads ( local , client_xp ); 
771        else if(type == BLOCK_ALL_THREADS   ) process_block_threads  ( local ); 
772        else if(type == UNBLOCK_ALL_THREADS ) process_unblock_threads( local );
773    }
774
775#if DEBUG_PROCESS_SIGACTION
776cycle = (uint32_t)hal_get_cycles();
777if( DEBUG_PROCESS_SIGACTION < cycle )
778printk("\n[%s] thread[%x,%x] exit after %s process %x / cycle %d\n",
779__FUNCTION__, client->process->pid, client->trdid,
780process_action_str( type ), pid, cycle );
781#endif
782
783}  // end process_sigaction()
784
785/////////////////////////////////////////////////
786void process_block_threads( process_t * process )
787{
788    thread_t          * target;         // pointer on target thread
789    thread_t          * this;           // pointer on calling thread
790    uint32_t            ltid;           // index in process th_tbl[]
791    uint32_t            count;          // requests counter
792    volatile uint32_t   ack_count;      // acknowledges counter
793
794    // get calling thread pointer
795    this = CURRENT_THREAD;
796
797#if DEBUG_PROCESS_SIGACTION
798pid_t pid = process->pid;
799uint32_t cycle = (uint32_t)hal_get_cycles();
800if( DEBUG_PROCESS_SIGACTION < cycle )
801printk("\n[%s] thread[%x,%x] enter for process %x in cluster %x / cycle %d\n",
802__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy , cycle );
803#endif
804
805// check target process is an user process
806assert( (LPID_FROM_PID( process->pid ) != 0 ),
807"process %x is not an user process\n", process->pid );
808
809    // get lock protecting process th_tbl[]
810    rwlock_rd_acquire( &process->th_lock );
811
812    // loop on target process local threads
813    // we use both "ltid" and "count" because it can exist "holes" in th_tbl
814    // - if the calling thread and the target thread are not running on the same
815    //   core, we ask the target scheduler to acknowlege the blocking
816    //   to be sure that the target thread is not running.
817    // - if the calling thread and the target thread are running on the same core,
818    //   we don't need confirmation from scheduler.
819           
820    for( ltid = 0 , count = 0 , ack_count = 0 ; count < process->th_nr ; ltid++ )
821    {
822        target = process->th_tbl[ltid];
823
824        if( target != NULL )                                 // thread exist
825        {
826            count++;
827
828            // set the global blocked bit in target thread descriptor.
829            thread_block( XPTR( local_cxy , target ) , THREAD_BLOCKED_GLOBAL );
830 
831            if( this->core->lid != target->core->lid )
832            {
833                // increment responses counter
834                hal_atomic_add( (void*)&ack_count , 1 );
835
836                // set FLAG_REQ_ACK and &ack_rsp_count in target descriptor
837                thread_set_req_ack( target , (uint32_t *)&ack_count );
838
839                // force scheduling on target thread
840                dev_pic_send_ipi( local_cxy , target->core->lid );
841            }
842        }
843    }
844
845    // release lock protecting process th_tbl[]
846    rwlock_rd_release( &process->th_lock );
847
848    // wait other threads acknowledges  TODO this could be improved...
849    while( 1 )
850    {
851        // exit when all scheduler acknowledges received
852        if ( ack_count == 0 ) break;
853   
854        // wait 1000 cycles before retry
855        hal_fixed_delay( 1000 );
856    }
857
858#if DEBUG_PROCESS_SIGACTION
859cycle = (uint32_t)hal_get_cycles();
860if( DEBUG_PROCESS_SIGACTION < cycle )
861printk("\n[%s] thread[%x,%x] exit for process %x in cluster %x / cycle %d\n",
862__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy , cycle );
863#endif
864
865}  // end process_block_threads()
866
867/////////////////////////////////////////////////
868void process_delete_threads( process_t * process,
869                             xptr_t      client_xp )
870{
871    thread_t          * target;        // local pointer on target thread
872    xptr_t              target_xp;     // extended pointer on target thread
873    cxy_t               owner_cxy;     // owner process cluster
874    uint32_t            ltid;          // index in process th_tbl
875    uint32_t            count;         // threads counter
876
877    // get calling thread pointer
878
879    // get target process owner cluster
880    owner_cxy = CXY_FROM_PID( process->pid );
881
882#if DEBUG_PROCESS_SIGACTION
883thread_t * this  = CURRENT_THREAD;
884uint32_t   cycle = (uint32_t)hal_get_cycles();
885if( DEBUG_PROCESS_SIGACTION < cycle )
886printk("\n[%s] thread[%x,%x] enter for process %x n cluster %x / cycle %d\n",
887__FUNCTION__, this->process->pid, this->trdid, process->pid, local_cxy, cycle );
888#endif
889
890// check target process is an user process
891assert( (LPID_FROM_PID( process->pid ) != 0),
892"process %x is not an user process\n", process->pid );
893
894    // get lock protecting process th_tbl[]
895    rwlock_wr_acquire( &process->th_lock );
896
897    // loop on target process local threads                       
898    // we use both "ltid" and "count" because it can exist "holes" in th_tbl
899    for( ltid = 0 , count = 0  ; count < process->th_nr ; ltid++ )
900    {
901        target = process->th_tbl[ltid];
902
903        if( target != NULL )    // valid thread 
904        {
905            count++;
906            target_xp = XPTR( local_cxy , target );
907
908            // main thread and client thread should not be deleted
909            if( ((ltid != 0) || (owner_cxy != local_cxy)) &&         // not main thread
910                (client_xp) != target_xp )                           // not client thread
911            {
912                // mark target thread for delete and block it
913                thread_delete( target_xp , process->pid , false );   // not forced
914            }
915        }
916    }
917
918    // release lock protecting process th_tbl[]
919    rwlock_wr_release( &process->th_lock );
920
921#if DEBUG_PROCESS_SIGACTION
922cycle = (uint32_t)hal_get_cycles();
923if( DEBUG_PROCESS_SIGACTION < cycle )
924printk("\n[%s] thread[%x,%x] exit for process %x in cluster %x / cycle %d\n",
925__FUNCTION__, this->process->pid, this->trdid, process->pid, local_cxy , cycle );
926#endif
927
928}  // end process_delete_threads()
929
930///////////////////////////////////////////////////
931void process_unblock_threads( process_t * process )
932{
933    thread_t          * target;        // pointer on target thead
934    uint32_t            ltid;          // index in process th_tbl
935    uint32_t            count;         // requests counter
936
937#if DEBUG_PROCESS_SIGACTION
938thread_t * this  = CURRENT_THREAD;
939pid_t      pid   = process->pid;
940uint32_t   cycle = (uint32_t)hal_get_cycles();
941if( DEBUG_PROCESS_SIGACTION < cycle )
942printk("\n[%s] thread[%x,%x] enter for process %x in cluster %x / cycle %d\n",
943__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy , cycle );
944#endif
945
946// check target process is an user process
947assert( ( LPID_FROM_PID( process->pid ) != 0 ),
948"process %x is not an user process\n", process->pid );
949
950    // get lock protecting process th_tbl[]
951    rwlock_rd_acquire( &process->th_lock );
952
953    // loop on process threads to unblock all threads
954    // we use both "ltid" and "count" because it can exist "holes" in th_tbl
955    for( ltid = 0 , count = 0 ; count < process->th_nr ; ltid++ )
956    {
957        target = process->th_tbl[ltid];
958
959        if( target != NULL )             // thread found
960        {
961            count++;
962
963            // reset the global blocked bit in target thread descriptor.
964            thread_unblock( XPTR( local_cxy , target ) , THREAD_BLOCKED_GLOBAL );
965        }
966    }
967
968    // release lock protecting process th_tbl[]
969    rwlock_rd_release( &process->th_lock );
970
971#if DEBUG_PROCESS_SIGACTION
972cycle = (uint32_t)hal_get_cycles();
973if( DEBUG_PROCESS_SIGACTION < cycle )
974printk("\n[%s] thread[%x,%x] exit for process %x in cluster %x / cycle %d\n",
975__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy, cycle );
976#endif
977
978}  // end process_unblock_threads()
979
980///////////////////////////////////////////////
981process_t * process_get_local_copy( pid_t pid )
982{
983    error_t        error;
984    process_t    * process_ptr;   // local pointer on process
985    xptr_t         process_xp;    // extended pointer on process
986
987    cluster_t * cluster = LOCAL_CLUSTER;
988
989#if DEBUG_PROCESS_GET_LOCAL_COPY
990thread_t * this = CURRENT_THREAD;
991uint32_t cycle = (uint32_t)hal_get_cycles();
992if( DEBUG_PROCESS_GET_LOCAL_COPY < cycle )
993printk("\n[%s] thread[%x,%x] enter for process %x in cluster %x / cycle %d\n",
994__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy, cycle );
995#endif
996
997    // get lock protecting local list of processes
998    remote_queuelock_acquire( XPTR( local_cxy , &cluster->pmgr.local_lock ) );
999
1000    // scan the local list of process descriptors to find the process
1001    xptr_t  iter;
1002    bool_t  found = false;
1003    XLIST_FOREACH( XPTR( local_cxy , &cluster->pmgr.local_root ) , iter )
1004    {
1005        process_xp  = XLIST_ELEMENT( iter , process_t , local_list );
1006        process_ptr = GET_PTR( process_xp );
1007        if( process_ptr->pid == pid )
1008        {
1009            found = true;
1010            break;
1011        }
1012    }
1013
1014    // release lock protecting local list of processes
1015    remote_queuelock_release( XPTR( local_cxy , &cluster->pmgr.local_lock ) );
1016
1017    // allocate memory for a new local process descriptor
1018    // and initialise it from reference cluster if not found
1019    if( !found )
1020    {
1021        // get extended pointer on reference process descriptor
1022        xptr_t ref_xp = cluster_get_reference_process_from_pid( pid );
1023
1024        assert( (ref_xp != XPTR_NULL) , "illegal pid\n" );
1025
1026        // allocate memory for local process descriptor
1027        process_ptr = process_alloc();
1028
1029        if( process_ptr == NULL )  return NULL;
1030
1031        // initialize local process descriptor copy
1032        error = process_copy_init( process_ptr , ref_xp );
1033
1034        if( error ) return NULL;
1035    }
1036
1037#if DEBUG_PROCESS_GET_LOCAL_COPY
1038cycle = (uint32_t)hal_get_cycles();
1039if( DEBUG_PROCESS_GET_LOCAL_COPY < cycle )
1040printk("\n[%s] thread[%x,%x] exit in cluster %x / process %x / cycle %d\n",
1041__FUNCTION__, this->process->pid, this->trdid, local_cxy, process_ptr, cycle );
1042#endif
1043
1044    return process_ptr;
1045
1046}  // end process_get_local_copy()
1047
1048////////////////////////////////////////////
1049pid_t process_get_ppid( xptr_t  process_xp )
1050{
1051    cxy_t       process_cxy;
1052    process_t * process_ptr;
1053    xptr_t      parent_xp;
1054    cxy_t       parent_cxy;
1055    process_t * parent_ptr;
1056
1057    // get process cluster and local pointer
1058    process_cxy = GET_CXY( process_xp );
1059    process_ptr = GET_PTR( process_xp );
1060
1061    // get pointers on parent process
1062    parent_xp  = (xptr_t)hal_remote_l64( XPTR( process_cxy , &process_ptr->parent_xp ) );
1063    parent_cxy = GET_CXY( parent_xp );
1064    parent_ptr = GET_PTR( parent_xp );
1065
1066    return hal_remote_l32( XPTR( parent_cxy , &parent_ptr->pid ) );
1067}
1068
1069//////////////////////////////////////////////////////////////////////////////////////////
1070// File descriptor array related functions
1071//////////////////////////////////////////////////////////////////////////////////////////
1072
1073///////////////////////////////////////////
1074void process_fd_init( process_t * process )
1075{
1076    uint32_t fd;
1077
1078    // initialize lock
1079    remote_queuelock_init( XPTR( local_cxy , &process->fd_array.lock ), LOCK_PROCESS_FDARRAY );
1080
1081    // initialize number of open files
1082    process->fd_array.current = 0;
1083
1084    // initialize array
1085    for ( fd = 0 ; fd < CONFIG_PROCESS_FILE_MAX_NR ; fd++ )
1086    {
1087        process->fd_array.array[fd] = XPTR_NULL;
1088    }
1089}
1090////////////////////////////////////////////////////
1091error_t process_fd_register( xptr_t      process_xp,
1092                             xptr_t      file_xp,
1093                             uint32_t  * fdid )
1094{
1095    bool_t    found;
1096    uint32_t  id;
1097    xptr_t    xp;
1098
1099    // get reference process cluster and local pointer
1100    process_t * process_ptr = GET_PTR( process_xp );
1101    cxy_t       process_cxy = GET_CXY( process_xp );
1102
1103// check client process is reference process
1104assert( (process_xp == hal_remote_l64( XPTR( process_cxy , &process_ptr->ref_xp ) ) ),
1105"client process must be reference process\n" );
1106
1107#if DEBUG_PROCESS_FD_REGISTER
1108thread_t * this  = CURRENT_THREAD;
1109uint32_t   cycle = (uint32_t)hal_get_cycles();
1110pid_t      pid   = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid) );
1111if( DEBUG_PROCESS_FD_REGISTER < cycle )
1112printk("\n[%s] thread[%x,%x] enter for process %x / cycle %d\n",
1113__FUNCTION__, this->process->pid, this->trdid, pid, cycle );
1114#endif
1115
1116    // build extended pointer on lock protecting reference fd_array
1117    xptr_t lock_xp = XPTR( process_cxy , &process_ptr->fd_array.lock );
1118
1119    // take lock protecting reference fd_array
1120        remote_queuelock_acquire( lock_xp );
1121
1122    found   = false;
1123
1124    for ( id = 0; id < CONFIG_PROCESS_FILE_MAX_NR ; id++ )
1125    {
1126        xp = hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[id] ) );
1127        if ( xp == XPTR_NULL )
1128        {
1129            // update reference fd_array
1130            hal_remote_s64( XPTR( process_cxy , &process_ptr->fd_array.array[id] ) , file_xp );
1131                hal_remote_atomic_add( XPTR( process_cxy , &process_ptr->fd_array.current ) , 1 );
1132
1133            // exit
1134                        *fdid = id;
1135            found = true;
1136            break;
1137        }
1138    }
1139
1140    // release lock protecting fd_array
1141        remote_queuelock_release( lock_xp );
1142
1143#if DEBUG_PROCESS_FD_REGISTER
1144cycle = (uint32_t)hal_get_cycles();
1145if( DEBUG_PROCESS_FD_REGISTER < cycle )
1146printk("\n[%s] thread[%x,%x] exit for process %x / fdid %d / cycle %d\n",
1147__FUNCTION__, this->process->pid, this->trdid, pid, id, cycle );
1148#endif
1149
1150    if ( !found ) return -1;
1151    else          return 0;
1152
1153}  // end process_fd_register()
1154
1155////////////////////////////////////////////////
1156xptr_t process_fd_get_xptr( process_t * process,
1157                            uint32_t    fdid )
1158{
1159    xptr_t  file_xp;
1160    xptr_t  lock_xp;
1161
1162    // access local copy of process descriptor
1163    file_xp = process->fd_array.array[fdid];
1164
1165    if( file_xp == XPTR_NULL )
1166    {
1167        // get reference process cluster and local pointer
1168        xptr_t      ref_xp  = process->ref_xp;
1169        cxy_t       ref_cxy = GET_CXY( ref_xp );
1170        process_t * ref_ptr = GET_PTR( ref_xp );
1171
1172        // build extended pointer on lock protecting reference fd_array
1173        lock_xp = XPTR( ref_cxy , &ref_ptr->fd_array.lock );
1174
1175        // take lock protecting reference fd_array
1176            remote_queuelock_acquire( lock_xp );
1177
1178        // access reference process descriptor
1179        file_xp = hal_remote_l64( XPTR( ref_cxy , &ref_ptr->fd_array.array[fdid] ) );
1180
1181        // update local fd_array if found
1182        if( file_xp != XPTR_NULL )  process->fd_array.array[fdid] = file_xp;
1183       
1184        // release lock protecting reference fd_array
1185            remote_queuelock_release( lock_xp );
1186    }
1187
1188    return file_xp;
1189
1190}  // end process_fd_get_xptr()
1191
1192///////////////////////////////////////////
1193void process_fd_remote_copy( xptr_t dst_xp,
1194                             xptr_t src_xp )
1195{
1196    uint32_t fd;
1197    xptr_t   entry;
1198
1199    // get cluster and local pointer for src fd_array
1200    cxy_t        src_cxy = GET_CXY( src_xp );
1201    fd_array_t * src_ptr = GET_PTR( src_xp );
1202
1203    // get cluster and local pointer for dst fd_array
1204    cxy_t        dst_cxy = GET_CXY( dst_xp );
1205    fd_array_t * dst_ptr = GET_PTR( dst_xp );
1206
1207    // get the remote lock protecting the src fd_array
1208        remote_queuelock_acquire( XPTR( src_cxy , &src_ptr->lock ) );
1209
1210    // loop on all fd_array entries
1211    for( fd = 0 ; fd < CONFIG_PROCESS_FILE_MAX_NR ; fd++ )
1212        {
1213                entry = (xptr_t)hal_remote_l64( XPTR( src_cxy , &src_ptr->array[fd] ) );
1214
1215                if( entry != XPTR_NULL )
1216                {
1217            // increment file descriptor refcount
1218            vfs_file_count_up( entry );
1219
1220                        // copy entry in destination process fd_array
1221                        hal_remote_s64( XPTR( dst_cxy , &dst_ptr->array[fd] ) , entry );
1222                }
1223        }
1224
1225    // release lock on source process fd_array
1226        remote_queuelock_release( XPTR( src_cxy , &src_ptr->lock ) );
1227
1228}  // end process_fd_remote_copy()
1229
1230
1231////////////////////////////////////
1232bool_t process_fd_array_full( void )
1233{
1234    // get extended pointer on reference process
1235    xptr_t ref_xp = CURRENT_THREAD->process->ref_xp;
1236
1237    // get reference process cluster and local pointer
1238    process_t * ref_ptr = GET_PTR( ref_xp );
1239    cxy_t       ref_cxy = GET_CXY( ref_xp );
1240
1241    // get number of open file descriptors from reference fd_array
1242    uint32_t current = hal_remote_l32( XPTR( ref_cxy , &ref_ptr->fd_array.current ) );
1243
1244        return ( current >= CONFIG_PROCESS_FILE_MAX_NR );
1245}
1246
1247
1248////////////////////////////////////////////////////////////////////////////////////
1249//  Thread related functions
1250////////////////////////////////////////////////////////////////////////////////////
1251
1252/////////////////////////////////////////////////////
1253error_t process_register_thread( process_t * process,
1254                                 thread_t  * thread,
1255                                 trdid_t   * trdid )
1256{
1257    ltid_t         ltid;
1258    bool_t         found = false;
1259 
1260// check arguments
1261assert( (process != NULL) , "process argument is NULL" );
1262assert( (thread != NULL) , "thread argument is NULL" );
1263
1264    // get the lock protecting th_tbl for all threads
1265    // but the idle thread executing kernel_init (cannot yield)
1266    if( thread->type != THREAD_IDLE ) rwlock_wr_acquire( &process->th_lock );
1267
1268    // scan th_tbl
1269    for( ltid = 0 ; ltid < CONFIG_THREADS_MAX_PER_CLUSTER ; ltid++ )
1270    {
1271        if( process->th_tbl[ltid] == NULL )
1272        {
1273            found = true;
1274            break;
1275        }
1276    }
1277
1278    if( found )
1279    {
1280        // register thread in th_tbl[]
1281        process->th_tbl[ltid] = thread;
1282        process->th_nr++;
1283
1284        // returns trdid
1285        *trdid = TRDID( local_cxy , ltid );
1286    }
1287
1288    // release the lock protecting th_tbl
1289    if( thread->type != THREAD_IDLE ) rwlock_wr_release( &process->th_lock );
1290
1291    return (found) ? 0 : 0xFFFFFFFF;
1292
1293}  // end process_register_thread()
1294
1295///////////////////////////////////////////////////
1296uint32_t process_remove_thread( thread_t * thread )
1297{
1298    uint32_t count;  // number of threads in local process descriptor
1299
1300// check thread
1301assert( (thread != NULL) , "thread argument is NULL" );
1302
1303    process_t * process = thread->process;
1304
1305    // get thread local index
1306    ltid_t  ltid = LTID_FROM_TRDID( thread->trdid );
1307   
1308    // get the lock protecting th_tbl[]
1309    rwlock_wr_acquire( &process->th_lock );
1310
1311    // get number of threads
1312    count = process->th_nr;
1313
1314// check th_nr value
1315assert( (count > 0) , "process th_nr cannot be 0" );
1316
1317    // remove thread from th_tbl[]
1318    process->th_tbl[ltid] = NULL;
1319    process->th_nr = count-1;
1320
1321    // release lock protecting th_tbl
1322    rwlock_wr_release( &process->th_lock );
1323
1324    return count;
1325
1326}  // end process_remove_thread()
1327
1328/////////////////////////////////////////////////////////
1329error_t process_make_fork( xptr_t      parent_process_xp,
1330                           xptr_t      parent_thread_xp,
1331                           pid_t     * child_pid,
1332                           thread_t ** child_thread )
1333{
1334    process_t * process;         // local pointer on child process descriptor
1335    thread_t  * thread;          // local pointer on child thread descriptor
1336    pid_t       new_pid;         // process identifier for child process
1337    pid_t       parent_pid;      // process identifier for parent process
1338    xptr_t      ref_xp;          // extended pointer on reference process
1339    xptr_t      vfs_bin_xp;      // extended pointer on .elf file
1340    error_t     error;
1341
1342    // get cluster and local pointer for parent process
1343    cxy_t       parent_process_cxy = GET_CXY( parent_process_xp );
1344    process_t * parent_process_ptr = GET_PTR( parent_process_xp );
1345
1346    // get parent process PID and extended pointer on .elf file
1347    parent_pid = hal_remote_l32 (XPTR( parent_process_cxy , &parent_process_ptr->pid));
1348    vfs_bin_xp = hal_remote_l64(XPTR( parent_process_cxy , &parent_process_ptr->vfs_bin_xp));
1349
1350    // get extended pointer on reference process
1351    ref_xp = hal_remote_l64( XPTR( parent_process_cxy , &parent_process_ptr->ref_xp ) );
1352
1353// check parent process is the reference process
1354assert( (parent_process_xp == ref_xp ) ,
1355"parent process must be the reference process" );
1356
1357#if DEBUG_PROCESS_MAKE_FORK
1358uint32_t cycle   = (uint32_t)hal_get_cycles();
1359thread_t * this  = CURRENT_THREAD;
1360trdid_t    trdid = this->trdid;
1361pid_t      pid   = this->process->pid;
1362if( DEBUG_PROCESS_MAKE_FORK < cycle )
1363printk("\n[%s] thread[%x,%x] enter / cluster %x / cycle %d\n",
1364__FUNCTION__, pid, trdid, local_cxy, cycle );
1365#endif
1366
1367    // allocate a process descriptor
1368    process = process_alloc();
1369    if( process == NULL )
1370    {
1371        printk("\n[ERROR] in %s : cannot get process in cluster %x\n", 
1372        __FUNCTION__, local_cxy ); 
1373        return -1;
1374    }
1375
1376    // allocate a child PID from local cluster
1377    error = cluster_pid_alloc( process , &new_pid );
1378    if( error ) 
1379    {
1380        printk("\n[ERROR] in %s : cannot get PID in cluster %x\n", 
1381        __FUNCTION__, local_cxy ); 
1382        process_free( process );
1383        return -1;
1384    }
1385
1386#if( DEBUG_PROCESS_MAKE_FORK & 1 )
1387cycle = (uint32_t)hal_get_cycles();
1388if( DEBUG_PROCESS_MAKE_FORK < cycle )
1389printk("\n[%s] thread[%x,%x] allocated child_process %x / cycle %d\n",
1390__FUNCTION__, pid, trdid, new_pid, cycle );
1391#endif
1392
1393    // initializes child process descriptor from parent process descriptor
1394    error = process_reference_init( process,
1395                                    new_pid,
1396                                    parent_process_xp );
1397    if( error ) 
1398    {
1399        printk("\n[ERROR] in %s : cannot initialize child process in cluster %x\n", 
1400        __FUNCTION__, local_cxy ); 
1401        process_free( process );
1402        return -1;
1403    }
1404
1405#if( DEBUG_PROCESS_MAKE_FORK & 1 )
1406cycle = (uint32_t)hal_get_cycles();
1407if( DEBUG_PROCESS_MAKE_FORK < cycle )
1408printk("\n[%s] thread[%x,%x] initialized child_process %x / cycle %d\n",
1409__FUNCTION__, pid, trdid, new_pid, cycle );
1410#endif
1411
1412    // copy VMM from parent descriptor to child descriptor
1413    error = vmm_fork_copy( process,
1414                           parent_process_xp );
1415    if( error )
1416    {
1417        printk("\n[ERROR] in %s : cannot copy VMM in cluster %x\n", 
1418        __FUNCTION__, local_cxy ); 
1419        process_free( process );
1420        cluster_pid_release( new_pid );
1421        return -1;
1422    }
1423
1424#if( DEBUG_PROCESS_MAKE_FORK & 1 )
1425cycle = (uint32_t)hal_get_cycles();
1426if( DEBUG_PROCESS_MAKE_FORK < cycle )
1427printk("\n[%s] thread[%x,%x] copied VMM from parent to child / cycle %d\n",
1428__FUNCTION__, pid, trdid, cycle );
1429#endif
1430
1431    // if parent_process is INIT, or if parent_process is the TXT owner,
1432    // the child_process becomes the owner of its TXT terminal
1433    if( (parent_pid == 1) || process_txt_is_owner( parent_process_xp ) )
1434    {
1435        process_txt_set_ownership( XPTR( local_cxy , process ) );
1436
1437#if( DEBUG_PROCESS_MAKE_FORK & 1 )
1438cycle = (uint32_t)hal_get_cycles();
1439if( DEBUG_PROCESS_MAKE_FORK < cycle )
1440printk("\n[%s] thread[%x,%x] / child takes TXT ownership / cycle %d\n",
1441__FUNCTION__ , pid, trdid, cycle );
1442#endif
1443
1444    }
1445
1446    // update extended pointer on .elf file
1447    process->vfs_bin_xp = vfs_bin_xp;
1448
1449    // create child thread descriptor from parent thread descriptor
1450    error = thread_user_fork( parent_thread_xp,
1451                              process,
1452                              &thread );
1453    if( error )
1454    {
1455        printk("\n[ERROR] in %s : cannot create thread in cluster %x\n",
1456        __FUNCTION__, local_cxy ); 
1457        process_free( process );
1458        cluster_pid_release( new_pid );
1459        return -1;
1460    }
1461
1462// check main thread LTID
1463assert( (LTID_FROM_TRDID(thread->trdid) == 0) ,
1464"main thread must have LTID == 0" );
1465
1466#if( DEBUG_PROCESS_MAKE_FORK & 1 )
1467cycle = (uint32_t)hal_get_cycles();
1468if( DEBUG_PROCESS_MAKE_FORK < cycle )
1469printk("\n[%s] thread[%x,%x] created main thread %x / cycle %d\n", 
1470__FUNCTION__, pid, trdid, thread, cycle );
1471#endif
1472
1473    // set COW flag in DATA, ANON, REMOTE vsegs for parent process VMM
1474    // this includes all parent process copies in all clusters
1475    if( parent_process_cxy == local_cxy )   // reference is local
1476    {
1477        vmm_set_cow( parent_process_ptr );
1478    }
1479    else                                    // reference is remote
1480    {
1481        rpc_vmm_set_cow_client( parent_process_cxy,
1482                                parent_process_ptr );
1483    }
1484
1485    // set COW flag in DATA, ANON, REMOTE vsegs for child process VMM
1486    vmm_set_cow( process );
1487 
1488#if( DEBUG_PROCESS_MAKE_FORK & 1 )
1489cycle = (uint32_t)hal_get_cycles();
1490if( DEBUG_PROCESS_MAKE_FORK < cycle )
1491printk("\n[%s] thread[%x,%x] set COW in parent and child / cycle %d\n",
1492__FUNCTION__, pid, trdid, cycle );
1493#endif
1494
1495    // get extended pointers on parent children_root, children_lock and children_nr
1496    xptr_t children_root_xp = XPTR( parent_process_cxy , &parent_process_ptr->children_root );
1497    xptr_t children_lock_xp = XPTR( parent_process_cxy , &parent_process_ptr->children_lock );
1498    xptr_t children_nr_xp   = XPTR( parent_process_cxy , &parent_process_ptr->children_nr   );
1499
1500    // register process in parent children list
1501    remote_queuelock_acquire( children_lock_xp );
1502        xlist_add_last( children_root_xp , XPTR( local_cxy , &process->children_list ) );
1503        hal_remote_atomic_add( children_nr_xp , 1 );
1504    remote_queuelock_release( children_lock_xp );
1505
1506    // return success
1507    *child_thread = thread;
1508    *child_pid    = new_pid;
1509
1510#if DEBUG_PROCESS_MAKE_FORK
1511cycle = (uint32_t)hal_get_cycles();
1512if( DEBUG_PROCESS_MAKE_FORK < cycle )
1513printk("\n[%s] thread[%x,%x] exit / created process %x / cycle %d\n",
1514__FUNCTION__, pid, trdid, new_pid, cycle );
1515#endif
1516
1517    return 0;
1518
1519}   // end process_make_fork()
1520
1521/////////////////////////////////////////////////////
1522error_t process_make_exec( exec_info_t  * exec_info )
1523{
1524    thread_t       * thread;                  // local pointer on this thread
1525    process_t      * process;                 // local pointer on this process
1526    pid_t            pid;                     // this process identifier
1527    xptr_t           ref_xp;                  // reference process for this process
1528        error_t          error;                   // value returned by called functions
1529    char           * path;                    // path to .elf file
1530    xptr_t           file_xp;                 // extended pointer on .elf file descriptor
1531    uint32_t         file_id;                 // file index in fd_array
1532    uint32_t         args_nr;                 // number of main thread arguments
1533    char          ** args_pointers;           // array of pointers on main thread arguments
1534
1535    // get calling thread, process, pid and ref_xp
1536    thread  = CURRENT_THREAD;
1537    process = thread->process;
1538    pid     = process->pid;
1539    ref_xp  = process->ref_xp;
1540
1541        // get relevant infos from exec_info
1542        path          = exec_info->path;
1543    args_nr       = exec_info->args_nr;
1544    args_pointers = exec_info->args_pointers;
1545
1546#if DEBUG_PROCESS_MAKE_EXEC
1547uint32_t cycle = (uint32_t)hal_get_cycles();
1548if( local_cxy == 0x11 )
1549printk("\n[%s] thread[%x,%x] enters for %s / cycle %d\n",
1550__FUNCTION__, pid, thread->trdid, path, cycle );
1551#endif
1552
1553    // open the file identified by <path>
1554    file_xp = XPTR_NULL;
1555    file_id = 0xFFFFFFFF;
1556        error   = vfs_open( process->vfs_root_xp,
1557                            path,
1558                        ref_xp,
1559                            O_RDONLY,
1560                            0,
1561                            &file_xp,
1562                            &file_id );
1563        if( error )
1564        {
1565                printk("\n[ERROR] in %s : failed to open file <%s>\n", __FUNCTION__ , path );
1566                return -1;
1567        }
1568
1569#if (DEBUG_PROCESS_MAKE_EXEC & 1)
1570cycle = (uint32_t)hal_get_cycles();
1571if( local_cxy == 0x11 )
1572printk("\n[%s] thread[%x,%x] opened file <%s> / cycle %d\n",
1573__FUNCTION__, pid, thread->trdid, path, cycle );
1574#endif
1575
1576    // delete all threads other than this main thread in all clusters
1577    process_sigaction( pid , DELETE_ALL_THREADS );
1578
1579#if (DEBUG_PROCESS_MAKE_EXEC & 1)
1580cycle = (uint32_t)hal_get_cycles();
1581if( local_cxy == 0x11 )
1582printk("\n[%s] thread[%x,%x] deleted existing threads / cycle %d\n",
1583__FUNCTION__, pid, thread->trdid, cycle );
1584#endif
1585
1586    // reset calling process VMM
1587    vmm_user_reset( process );
1588
1589#if( DEBUG_PROCESS_MAKE_EXEC & 1 )
1590cycle = (uint32_t)hal_get_cycles();
1591if( local_cxy == 0x11 )
1592printk("\n[%s] thread[%x,%x] completed VMM reset / cycle %d\n",
1593__FUNCTION__, pid, thread->trdid, cycle );
1594#endif
1595
1596    // re-initialize the VMM (args/envs vsegs registration)
1597    error = vmm_user_init( process );
1598    if( error )
1599    {
1600        printk("\n[ERROR] in %s : cannot initialise VMM for %s\n", __FUNCTION__ , path );
1601        vfs_close( file_xp , file_id );
1602        // FIXME restore old process VMM [AG]
1603        return -1;
1604    }
1605   
1606#if( DEBUG_PROCESS_MAKE_EXEC & 1 )
1607cycle = (uint32_t)hal_get_cycles();
1608if( local_cxy == 0x11 )
1609printk("\n[%s] thread[%x,%x] registered args/envs vsegs / cycle %d\n",
1610__FUNCTION__, pid, thread->trdid, cycle );
1611#endif
1612
1613    // register code & data vsegs as well as entry-point in process VMM,
1614    // and register extended pointer on .elf file in process descriptor
1615        error = elf_load_process( file_xp , process );
1616    if( error )
1617        {
1618                printk("\n[ERROR] in %s : failed to access <%s>\n", __FUNCTION__ , path );
1619        vfs_close( file_xp , file_id );
1620        // FIXME restore old process VMM [AG]
1621        return -1;
1622        }
1623
1624#if( DEBUG_PROCESS_MAKE_EXEC & 1 )
1625cycle = (uint32_t)hal_get_cycles();
1626if( local_cxy == 0x11 )
1627printk("\n[%s] thread[%x,%x] registered code/data vsegs / cycle %d\n",
1628__FUNCTION__, pid, thread->trdid, cycle );
1629#endif
1630
1631    // update the existing main thread descriptor... and jump to user code
1632    error = thread_user_exec( (void *)process->vmm.entry_point,
1633                              args_nr,
1634                              args_pointers );
1635    if( error )
1636    {
1637        printk("\n[ERROR] in %s : cannot update main thread for %s\n", __FUNCTION__ , path );
1638        vfs_close( file_xp , file_id );
1639        // FIXME restore old process VMM
1640        return -1;
1641    }
1642
1643    assert( false, "we should not execute this code");
1644 
1645        return 0;
1646
1647}  // end process_make_exec()
1648
1649
1650////////////////////////////////////////////////
1651void process_zero_create( process_t   * process,
1652                          boot_info_t * info )
1653{
1654    error_t error;
1655    pid_t   pid;
1656
1657#if DEBUG_PROCESS_ZERO_CREATE
1658uint32_t cycle = (uint32_t)hal_get_cycles();
1659if( DEBUG_PROCESS_ZERO_CREATE < cycle )
1660printk("\n[%s] enter / cluster %x / cycle %d\n",
1661__FUNCTION__, local_cxy, cycle );
1662#endif
1663
1664    // get pointer on VMM
1665    vmm_t * vmm = &process->vmm;
1666
1667    // get PID from local cluster manager for this kernel process
1668    error = cluster_pid_alloc( process , &pid );
1669
1670    if( error || (LPID_FROM_PID( pid ) != 0) )
1671    {
1672        printk("\n[PANIC] in %s : cannot get valid PID in cluster %x / PID = %x\n",
1673        __FUNCTION__ , local_cxy, pid );
1674        hal_core_sleep();
1675    }
1676
1677    // initialize PID, REF_XP, PARENT_XP, and STATE
1678    // the kernel process_zero is its own parent_process,
1679    // reference_process, and owner_process, and cannot be killed...
1680    process->pid        = pid;
1681    process->ref_xp     = XPTR( local_cxy , process );
1682    process->owner_xp   = XPTR( local_cxy , process );
1683    process->parent_xp  = XPTR( local_cxy , process );
1684    process->term_state = 0;
1685
1686    // initilise VSL as empty
1687    vmm->vsegs_nr = 0;
1688        xlist_root_init( XPTR( local_cxy , &vmm->vsegs_root ) );
1689
1690    // initialise GPT as empty
1691    error = hal_gpt_create( &vmm->gpt );
1692    if( error ) 
1693    {
1694        printk("\n[PANIC] in %s : cannot create empty GPT\n", __FUNCTION__ );
1695        hal_core_sleep();
1696    }
1697
1698    // initialize VSL and GPT locks
1699    remote_rwlock_init( XPTR( local_cxy , &vmm->vsl_lock ) , LOCK_VMM_VSL );
1700   
1701    // create kernel vsegs in GPT and VSL, as required by the hardware architecture
1702    error = hal_vmm_kernel_init( info );
1703    if( error ) 
1704    {
1705        printk("\n[PANIC] in %s : cannot create kernel vsegs in VMM\n", __FUNCTION__ );
1706        hal_core_sleep();
1707    }
1708
1709    // reset th_tbl[] array and associated fields
1710    uint32_t i;
1711    for( i = 0 ; i < CONFIG_THREADS_MAX_PER_CLUSTER ; i++ )
1712        {
1713        process->th_tbl[i] = NULL;
1714    }
1715    process->th_nr  = 0;
1716    rwlock_init( &process->th_lock , LOCK_PROCESS_THTBL );
1717
1718
1719    // reset children list as empty
1720    xlist_root_init( XPTR( local_cxy , &process->children_root ) );
1721    process->children_nr = 0;
1722    remote_queuelock_init( XPTR( local_cxy , &process->children_lock ),
1723                           LOCK_PROCESS_CHILDREN );
1724
1725    // register kernel process in cluster manager local_list
1726    cluster_process_local_link( process );
1727   
1728        hal_fence();
1729
1730#if DEBUG_PROCESS_ZERO_CREATE
1731cycle = (uint32_t)hal_get_cycles();
1732if( DEBUG_PROCESS_ZERO_CREATE < cycle )
1733printk("\n[%s] exit / cluster %x / cycle %d\n",
1734__FUNCTION__, local_cxy, cycle );
1735#endif
1736
1737}  // end process_zero_create()
1738
1739////////////////////////////////
1740void process_init_create( void )
1741{
1742    process_t      * process;       // local pointer on process descriptor
1743    pid_t            pid;           // process_init identifier
1744    thread_t       * thread;        // local pointer on main thread
1745    pthread_attr_t   attr;          // main thread attributes
1746    lid_t            lid;           // selected core local index for main thread
1747    xptr_t           file_xp;       // extended pointer on .elf file descriptor
1748    uint32_t         file_id;       // file index in fd_array
1749    error_t          error;
1750
1751#if DEBUG_PROCESS_INIT_CREATE
1752thread_t * this = CURRENT_THREAD;
1753uint32_t cycle = (uint32_t)hal_get_cycles();
1754if( DEBUG_PROCESS_INIT_CREATE < cycle )
1755printk("\n[%s] thread[%x,%x] enter / cycle %d\n",
1756__FUNCTION__, this->process->pid, this->trdid, cycle );
1757#endif
1758
1759    // allocates memory for process descriptor from local cluster
1760        process = process_alloc(); 
1761    if( process == NULL )
1762    {
1763        printk("\n[PANIC] in %s : cannot allocate process\n", __FUNCTION__ );
1764        hal_core_sleep();
1765    }
1766
1767    // set the CWD and VFS_ROOT fields in process descriptor
1768    process->cwd_xp      = process_zero.vfs_root_xp;
1769    process->vfs_root_xp = process_zero.vfs_root_xp;
1770
1771    // get PID from local cluster
1772    error = cluster_pid_alloc( process , &pid );
1773    if( error ) 
1774    {
1775        printk("\n[PANIC] in %s : cannot allocate PID\n", __FUNCTION__ );
1776        hal_core_sleep();
1777    }
1778    if( pid != 1 ) 
1779    {
1780        printk("\n[PANIC] in %s : process PID must be 0x1\n", __FUNCTION__ );
1781        hal_core_sleep();
1782    }
1783
1784    // initialize process descriptor / parent is local process_zero
1785    error = process_reference_init( process,
1786                                    pid,
1787                                    XPTR( local_cxy , &process_zero ) ); 
1788    if( error )
1789    {
1790        printk("\n[PANIC] in %s : cannot initialize process\n", __FUNCTION__ );
1791        hal_core_sleep();
1792    }
1793
1794#if(DEBUG_PROCESS_INIT_CREATE & 1)
1795if( DEBUG_PROCESS_INIT_CREATE < cycle )
1796printk("\n[%s] thread[%x,%x] initialized process descriptor\n",
1797__FUNCTION__, this->process->pid, this->trdid );
1798#endif
1799
1800    // open the file identified by CONFIG_PROCESS_INIT_PATH
1801    file_xp = XPTR_NULL;
1802    file_id = -1;
1803        error   = vfs_open( process->vfs_root_xp,
1804                            CONFIG_PROCESS_INIT_PATH,
1805                        XPTR( local_cxy , process ),
1806                            O_RDONLY,
1807                            0,
1808                            &file_xp,
1809                            &file_id );
1810    if( error )
1811    {
1812        printk("\n[PANIC] in %s : cannot open file <%s>\n",
1813         __FUNCTION__, CONFIG_PROCESS_INIT_PATH  );
1814        hal_core_sleep();
1815    }
1816
1817#if(DEBUG_PROCESS_INIT_CREATE & 1)
1818if( DEBUG_PROCESS_INIT_CREATE < cycle )
1819printk("\n[%s] thread[%x,%x] open .elf file decriptor\n",
1820__FUNCTION__, this->process->pid, this->trdid );
1821#endif
1822
1823    // register "code" and "data" vsegs as well as entry-point
1824    // in process VMM, using information contained in the elf file.
1825        error = elf_load_process( file_xp , process );
1826
1827    if( error ) 
1828    {
1829        printk("\n[PANIC] in %s : cannot access file <%s>\n",
1830         __FUNCTION__, CONFIG_PROCESS_INIT_PATH  );
1831        hal_core_sleep();
1832    }
1833
1834
1835#if(DEBUG_PROCESS_INIT_CREATE & 1)
1836if( DEBUG_PROCESS_INIT_CREATE < cycle )
1837printk("\n[%s] thread[%x,%x] registered code/data vsegs in VMM\n",
1838__FUNCTION__, this->process->pid, this->trdid );
1839#endif
1840
1841#if (DEBUG_PROCESS_INIT_CREATE & 1)
1842hal_vmm_display( process , true );
1843#endif
1844
1845    // get extended pointers on process_zero children_root, children_lock
1846    xptr_t children_root_xp = XPTR( local_cxy , &process_zero.children_root );
1847    xptr_t children_lock_xp = XPTR( local_cxy , &process_zero.children_lock );
1848
1849    // take lock protecting kernel process children list
1850    remote_queuelock_acquire( children_lock_xp );
1851
1852    // register process INIT in parent local process_zero
1853        xlist_add_last( children_root_xp , XPTR( local_cxy , &process->children_list ) );
1854        hal_atomic_add( &process_zero.children_nr , 1 );
1855
1856    // release lock protecting kernel process children list
1857    remote_queuelock_release( children_lock_xp );
1858
1859#if(DEBUG_PROCESS_INIT_CREATE & 1)
1860if( DEBUG_PROCESS_INIT_CREATE < cycle )
1861printk("\n[%s] thread[%x,%x] registered init process in parent\n",
1862__FUNCTION__, this->process->pid, this->trdid );
1863#endif
1864
1865    // select a core in local cluster to execute the main thread
1866    lid  = cluster_select_local_core();
1867
1868    // initialize pthread attributes for main thread
1869    attr.attributes = PT_ATTR_DETACH | PT_ATTR_CLUSTER_DEFINED | PT_ATTR_CORE_DEFINED;
1870    attr.cxy        = local_cxy;
1871    attr.lid        = lid;
1872
1873    // create and initialize thread descriptor
1874        error = thread_user_create( pid,
1875                                (void *)process->vmm.entry_point,
1876                                NULL,
1877                                &attr,
1878                                &thread );
1879
1880    if( error )
1881    {
1882        printk("\n[PANIC] in %s : cannot create main thread\n", __FUNCTION__  );
1883        hal_core_sleep();
1884    }
1885    if( thread->trdid != 0 )
1886    {
1887        printk("\n[PANIC] in %s : bad main thread trdid\n", __FUNCTION__  );
1888        hal_core_sleep();
1889    }
1890
1891#if(DEBUG_PROCESS_INIT_CREATE & 1)
1892if( DEBUG_PROCESS_INIT_CREATE < cycle )
1893printk("\n[%s] thread[%x,%x] created main thread\n",
1894__FUNCTION__, this->process->pid, this->trdid );
1895#endif
1896
1897    // activate thread
1898        thread_unblock( XPTR( local_cxy , thread ) , THREAD_BLOCKED_GLOBAL );
1899
1900    hal_fence();
1901
1902#if DEBUG_PROCESS_INIT_CREATE
1903cycle = (uint32_t)hal_get_cycles();
1904if( DEBUG_PROCESS_INIT_CREATE < cycle )
1905printk("\n[%s] thread[%x,%x] exit / cycle %d\n",
1906__FUNCTION__, this->process->pid, this->trdid, cycle );
1907#endif
1908
1909}  // end process_init_create()
1910
1911/////////////////////////////////////////
1912void process_display( xptr_t process_xp )
1913{
1914    process_t   * process_ptr;
1915    cxy_t         process_cxy;
1916
1917    xptr_t        parent_xp;       // extended pointer on parent process
1918    process_t   * parent_ptr;
1919    cxy_t         parent_cxy;
1920
1921    xptr_t        owner_xp;        // extended pointer on owner process
1922    process_t   * owner_ptr;
1923    cxy_t         owner_cxy;
1924
1925    pid_t         pid;
1926    pid_t         ppid;
1927    lpid_t        lpid;
1928    uint32_t      state;
1929    uint32_t      th_nr;
1930
1931    xptr_t        txt_file_xp;     // extended pointer on TXT_RX file descriptor
1932    xptr_t        txt_chdev_xp;    // extended pointer on TXT_RX chdev
1933    chdev_t     * txt_chdev_ptr;
1934    cxy_t         txt_chdev_cxy;
1935    xptr_t        txt_owner_xp;    // extended pointer on TXT owner process
1936
1937    xptr_t        elf_file_xp;     // extended pointer on .elf file
1938    cxy_t         elf_file_cxy;
1939    vfs_file_t  * elf_file_ptr;
1940    vfs_inode_t * elf_inode_ptr;   // local pointer on .elf inode
1941
1942    char          txt_name[CONFIG_VFS_MAX_NAME_LENGTH];
1943    char          elf_name[CONFIG_VFS_MAX_NAME_LENGTH];
1944
1945    // get cluster and local pointer on process
1946    process_ptr = GET_PTR( process_xp );
1947    process_cxy = GET_CXY( process_xp );
1948
1949    // get process PID, LPID, and state
1950    pid   = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid ) );
1951    lpid  = LPID_FROM_PID( pid );
1952    state = hal_remote_l32( XPTR( process_cxy , &process_ptr->term_state ) );
1953
1954    // get process PPID
1955    parent_xp  = hal_remote_l64( XPTR( process_cxy , &process_ptr->parent_xp ) );
1956    parent_cxy = GET_CXY( parent_xp );
1957    parent_ptr = GET_PTR( parent_xp );
1958    ppid       = hal_remote_l32( XPTR( parent_cxy , &parent_ptr->pid ) );
1959
1960    // get number of threads
1961    th_nr      = hal_remote_l32( XPTR( process_cxy , &process_ptr->th_nr ) );
1962
1963    // get pointers on owner process descriptor
1964    owner_xp  = hal_remote_l64( XPTR( process_cxy , &process_ptr->owner_xp ) );
1965    owner_cxy = GET_CXY( owner_xp );
1966    owner_ptr = GET_PTR( owner_xp );
1967
1968    // get process TXT name and .elf name
1969    if( lpid )                                   // user process
1970    {
1971
1972        // get extended pointer on file descriptor associated to TXT_RX
1973        txt_file_xp = hal_remote_l64( XPTR( owner_cxy , &owner_ptr->fd_array.array[0] ) );
1974
1975        assert( (txt_file_xp != XPTR_NULL) ,
1976        "process must be attached to one TXT terminal" ); 
1977
1978        // get TXT_RX chdev pointers
1979        txt_chdev_xp  = chdev_from_file( txt_file_xp );
1980        txt_chdev_cxy = GET_CXY( txt_chdev_xp );
1981        txt_chdev_ptr = GET_PTR( txt_chdev_xp );
1982
1983        // get TXT_RX name and ownership
1984        hal_remote_strcpy( XPTR( local_cxy , txt_name ) ,
1985                           XPTR( txt_chdev_cxy , txt_chdev_ptr->name ) );
1986   
1987        txt_owner_xp = (xptr_t)hal_remote_l64( XPTR( txt_chdev_cxy, 
1988                                                     &txt_chdev_ptr->ext.txt.owner_xp ) );
1989
1990        // get process .elf name
1991        elf_file_xp   = hal_remote_l64( XPTR( process_cxy , &process_ptr->vfs_bin_xp ) );
1992        elf_file_cxy  = GET_CXY( elf_file_xp );
1993        elf_file_ptr  = GET_PTR( elf_file_xp );
1994        elf_inode_ptr = hal_remote_lpt( XPTR( elf_file_cxy , &elf_file_ptr->inode ) );
1995        vfs_inode_get_name( XPTR( elf_file_cxy , elf_inode_ptr ) , elf_name );
1996    }
1997    else                                         // kernel process_zero
1998    {
1999        // TXT name and .elf name are not registered in kernel process_zero
2000        strcpy( txt_name , "txt0_rx" );
2001        txt_owner_xp = process_xp; 
2002        strcpy( elf_name , "kernel.elf" );
2003    }
2004
2005    // display process info
2006    if( txt_owner_xp == process_xp )
2007    {
2008        nolock_printk("PID %X | %s (FG) | %X | PPID %X | TS %X | %d | %s\n", 
2009        pid, txt_name, process_ptr, ppid, state, th_nr, elf_name );
2010    }
2011    else
2012    {
2013        nolock_printk("PID %X | %s (BG) | %X | PPID %X | TS %X | %d | %s\n", 
2014        pid, txt_name, process_ptr, ppid, state, th_nr, elf_name );
2015    }
2016}  // end process_display()
2017
2018
2019////////////////////////////////////////////////////////////////////////////////////////
2020//     Terminals related functions
2021////////////////////////////////////////////////////////////////////////////////////////
2022
2023//////////////////////////////////
2024uint32_t process_txt_alloc( void )
2025{
2026    uint32_t  index;       // TXT terminal index
2027    xptr_t    chdev_xp;    // extended pointer on TXT_RX chdev
2028    chdev_t * chdev_ptr;   // local pointer on TXT_RX chdev
2029    cxy_t     chdev_cxy;   // TXT_RX chdev cluster
2030    xptr_t    root_xp;     // extended pointer on owner field in chdev
2031
2032    // scan the user TXT_RX chdevs (TXT0 is reserved for kernel)
2033    for( index = 1 ; index < LOCAL_CLUSTER->nb_txt_channels ; index ++ )
2034    {
2035        // get pointers on TXT_RX[index]
2036        chdev_xp  = chdev_dir.txt_rx[index];
2037        chdev_cxy = GET_CXY( chdev_xp );
2038        chdev_ptr = GET_PTR( chdev_xp );
2039
2040        // get extended pointer on root of attached process
2041        root_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.root );
2042
2043        // return free TXT index if found
2044        if( xlist_is_empty( root_xp ) ) return index; 
2045    }
2046
2047    assert( false , "no free TXT terminal found" );
2048
2049    return -1;
2050
2051} // end process_txt_alloc()
2052
2053/////////////////////////////////////////////
2054void process_txt_attach( process_t * process,
2055                         uint32_t    txt_id )
2056{
2057    xptr_t      chdev_xp;     // extended pointer on TXT_RX chdev
2058    cxy_t       chdev_cxy;    // TXT_RX chdev cluster
2059    chdev_t *   chdev_ptr;    // local pointer on TXT_RX chdev
2060    xptr_t      root_xp;      // extended pointer on list root in chdev
2061    xptr_t      lock_xp;      // extended pointer on list lock in chdev
2062
2063// check process is in owner cluster
2064assert( (CXY_FROM_PID( process->pid ) == local_cxy) ,
2065"process descriptor not in owner cluster" );
2066
2067// check terminal index
2068assert( (txt_id < LOCAL_CLUSTER->nb_txt_channels) ,
2069"illegal TXT terminal index" );
2070
2071    // get pointers on TXT_RX[txt_id] chdev
2072    chdev_xp  = chdev_dir.txt_rx[txt_id];
2073    chdev_cxy = GET_CXY( chdev_xp );
2074    chdev_ptr = GET_PTR( chdev_xp );
2075
2076    // get extended pointer on root & lock of attached process list
2077    root_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.root );
2078    lock_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.lock );
2079
2080    // get lock protecting list of processes attached to TXT
2081    remote_busylock_acquire( lock_xp );
2082
2083    // insert process in attached process list
2084    xlist_add_last( root_xp , XPTR( local_cxy , &process->txt_list ) );
2085
2086    // release lock protecting list of processes attached to TXT
2087    remote_busylock_release( lock_xp );
2088
2089#if DEBUG_PROCESS_TXT
2090thread_t * this = CURRENT_THREAD;
2091uint32_t cycle = (uint32_t)hal_get_cycles();
2092if( DEBUG_PROCESS_TXT < cycle )
2093printk("\n[%s] thread[%x,%x] attached process %x to TXT %d / cycle %d\n",
2094__FUNCTION__, this->process->pid, this->trdid, process->pid, txt_id , cycle );
2095#endif
2096
2097} // end process_txt_attach()
2098
2099/////////////////////////////////////////////
2100void process_txt_detach( xptr_t  process_xp )
2101{
2102    process_t * process_ptr;  // local pointer on process in owner cluster
2103    cxy_t       process_cxy;  // process owner cluster
2104    pid_t       process_pid;  // process identifier
2105    xptr_t      file_xp;      // extended pointer on stdin file
2106    xptr_t      chdev_xp;     // extended pointer on TXT_RX chdev
2107    cxy_t       chdev_cxy;    // TXT_RX chdev cluster
2108    chdev_t *   chdev_ptr;    // local pointer on TXT_RX chdev
2109    xptr_t      lock_xp;      // extended pointer on list lock in chdev
2110
2111    // get process cluster, local pointer, and PID
2112    process_cxy = GET_CXY( process_xp );
2113    process_ptr = GET_PTR( process_xp );
2114    process_pid = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid ) );
2115
2116// check process descriptor in owner cluster
2117assert( (CXY_FROM_PID( process_pid ) == process_cxy ) ,
2118"process descriptor not in owner cluster" );
2119
2120    // release TXT ownership (does nothing if not TXT owner)
2121    process_txt_transfer_ownership( process_xp );
2122
2123    // get extended pointer on process stdin pseudo file
2124    file_xp = (xptr_t)hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[0] ) );
2125
2126    // get pointers on TXT_RX chdev
2127    chdev_xp  = chdev_from_file( file_xp );
2128    chdev_cxy = GET_CXY( chdev_xp );
2129    chdev_ptr = (chdev_t *)GET_PTR( chdev_xp );
2130
2131    // get extended pointer on lock protecting attached process list
2132    lock_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.lock );
2133
2134    // get lock protecting list of processes attached to TXT
2135    remote_busylock_acquire( lock_xp );
2136
2137    // unlink process from attached process list
2138    xlist_unlink( XPTR( process_cxy , &process_ptr->txt_list ) );
2139
2140    // release lock protecting list of processes attached to TXT
2141    remote_busylock_release( lock_xp );
2142
2143#if DEBUG_PROCESS_TXT
2144thread_t * this = CURRENT_THREAD;
2145uint32_t cycle  = (uint32_t)hal_get_cycles();
2146uint32_t txt_id = hal_remote_l32( XPTR( chdev_cxy , &chdev_ptr->channel ) );
2147if( DEBUG_PROCESS_TXT < cycle )
2148printk("\n[%s] thread[%x,%x] detached process %x from TXT%d / cycle %d\n",
2149__FUNCTION__, this->process->pid, this->trdid, process_pid, txt_id, cycle );
2150#endif
2151
2152} // end process_txt_detach()
2153
2154///////////////////////////////////////////////////
2155void process_txt_set_ownership( xptr_t process_xp )
2156{
2157    process_t * process_ptr;
2158    cxy_t       process_cxy;
2159    pid_t       process_pid;
2160    xptr_t      file_xp;
2161    xptr_t      txt_xp;     
2162    chdev_t   * txt_ptr;
2163    cxy_t       txt_cxy;
2164
2165    // get pointers on process in owner cluster
2166    process_cxy = GET_CXY( process_xp );
2167    process_ptr = GET_PTR( process_xp );
2168    process_pid = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid ) );
2169
2170    // check owner cluster
2171    assert( (process_cxy == CXY_FROM_PID( process_pid )) ,
2172    "process descriptor not in owner cluster" );
2173
2174    // get extended pointer on stdin pseudo file
2175    file_xp = hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[0] ) );
2176
2177    // get pointers on TXT chdev
2178    txt_xp  = chdev_from_file( file_xp );
2179    txt_cxy = GET_CXY( txt_xp );
2180    txt_ptr = GET_PTR( txt_xp );
2181
2182    // set owner field in TXT chdev
2183    hal_remote_s64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , process_xp );
2184
2185#if DEBUG_PROCESS_TXT
2186thread_t * this = CURRENT_THREAD;
2187uint32_t cycle  = (uint32_t)hal_get_cycles();
2188uint32_t txt_id = hal_remote_l32( XPTR( txt_cxy , &txt_ptr->channel ) );
2189if( DEBUG_PROCESS_TXT < cycle )
2190printk("\n[%s] thread[%x,%x] give TXT%d ownership to process %x / cycle %d\n",
2191__FUNCTION__, this->process->pid, this->trdid, txt_id, process_pid, cycle );
2192#endif
2193
2194}  // end process_txt_set ownership()
2195
2196////////////////////////////////////////////////////////
2197void process_txt_transfer_ownership( xptr_t process_xp )
2198{
2199    process_t * process_ptr;     // local pointer on process releasing ownership
2200    cxy_t       process_cxy;     // process cluster
2201    pid_t       process_pid;     // process identifier
2202    xptr_t      file_xp;         // extended pointer on TXT_RX pseudo file
2203    xptr_t      txt_xp;          // extended pointer on TXT_RX chdev
2204    chdev_t   * txt_ptr;         // local pointer on TXT_RX chdev
2205    cxy_t       txt_cxy;         // cluster of TXT_RX chdev
2206    uint32_t    txt_id;          // TXT_RX channel
2207    xptr_t      owner_xp;        // extended pointer on current TXT_RX owner
2208    xptr_t      root_xp;         // extended pointer on root of attached process list
2209    xptr_t      lock_xp;         // extended pointer on lock protecting attached process list
2210    xptr_t      iter_xp;         // iterator for xlist
2211    xptr_t      current_xp;      // extended pointer on current process
2212    bool_t      found;
2213
2214#if DEBUG_PROCESS_TXT
2215thread_t * this  = CURRENT_THREAD;
2216uint32_t   cycle;
2217#endif
2218
2219    // get pointers on target process
2220    process_cxy = GET_CXY( process_xp );
2221    process_ptr = GET_PTR( process_xp );
2222    process_pid = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid ) );
2223
2224// check owner cluster
2225assert( (process_cxy == CXY_FROM_PID( process_pid )) ,
2226"process descriptor not in owner cluster" );
2227
2228    // get extended pointer on stdin pseudo file
2229    file_xp = hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[0] ) );
2230
2231    // get pointers on TXT chdev
2232    txt_xp  = chdev_from_file( file_xp );
2233    txt_cxy = GET_CXY( txt_xp );
2234    txt_ptr = GET_PTR( txt_xp );
2235
2236    // get relevant infos from chdev descriptor
2237    owner_xp = hal_remote_l64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) );
2238    txt_id   = hal_remote_l32( XPTR( txt_cxy , &txt_ptr->channel ) );
2239
2240    // transfer ownership only if target process is the TXT owner
2241    if( (owner_xp == process_xp) && (txt_id > 0) ) 
2242    {
2243        // get extended pointers on root and lock of attached processes list
2244        root_xp = XPTR( txt_cxy , &txt_ptr->ext.txt.root );
2245        lock_xp = XPTR( txt_cxy , &txt_ptr->ext.txt.lock );
2246
2247        if( process_get_ppid( process_xp ) != 1 )       // target process is not KSH
2248        {
2249            // get lock
2250            remote_busylock_acquire( lock_xp );
2251
2252            // scan attached process list to find KSH process
2253            found = false;
2254            for( iter_xp = hal_remote_l64( root_xp ) ;
2255                 (iter_xp != root_xp) && (found == false) ;
2256                 iter_xp = hal_remote_l64( iter_xp ) )
2257            {
2258                current_xp = XLIST_ELEMENT( iter_xp , process_t , txt_list );
2259
2260                if( process_get_ppid( current_xp ) == 1 )  // current is KSH
2261                {
2262                    // set owner field in TXT chdev
2263                    hal_remote_s64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , current_xp );
2264
2265#if DEBUG_PROCESS_TXT
2266cycle = (uint32_t)hal_get_cycles();
2267if( DEBUG_PROCESS_TXT < cycle )
2268printk("\n[%s] thread[%x,%x] transfered TXT%d ownership to KSH / cycle %d\n",
2269__FUNCTION__, this->process->pid, this->trdid, txt_id, cycle );
2270#endif
2271                    found = true;
2272                }
2273            }
2274
2275            // release lock
2276            remote_busylock_release( lock_xp );
2277
2278// It must exist a KSH process for each user TXT channel
2279assert( (found == true), "KSH process not found for TXT%d", txt_id );
2280
2281        }
2282        else                                           // target process is KSH
2283        {
2284            // get lock
2285            remote_busylock_acquire( lock_xp );
2286
2287            // scan attached process list to find another process
2288            found = false;
2289            for( iter_xp = hal_remote_l64( root_xp ) ;
2290                 (iter_xp != root_xp) && (found == false) ;
2291                 iter_xp = hal_remote_l64( iter_xp ) )
2292            {
2293                current_xp  = XLIST_ELEMENT( iter_xp , process_t , txt_list );
2294
2295                if( current_xp != process_xp )            // current is not KSH
2296                {
2297                    // set owner field in TXT chdev
2298                    hal_remote_s64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , current_xp );
2299
2300#if DEBUG_PROCESS_TXT
2301cycle  = (uint32_t)hal_get_cycles();
2302cxy_t       current_cxy = GET_CXY( current_xp );
2303process_t * current_ptr = GET_PTR( current_xp );
2304uint32_t    new_pid     = hal_remote_l32( XPTR( current_cxy , &current_ptr->pid ) );
2305if( DEBUG_PROCESS_TXT < cycle )
2306printk("\n[%s] thread[%x,%x] transfered TXT%d ownership to process %x / cycle %d\n",
2307__FUNCTION__,this->process->pid, this->trdid, txt_id, new_pid, cycle );
2308#endif
2309                    found = true;
2310                }
2311            }
2312
2313            // release lock
2314            remote_busylock_release( lock_xp );
2315
2316            // no more owner for TXT if no other process found
2317            if( found == false )
2318            {
2319                // set owner field in TXT chdev
2320                hal_remote_s64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , XPTR_NULL );
2321
2322#if DEBUG_PROCESS_TXT
2323cycle = (uint32_t)hal_get_cycles();
2324if( DEBUG_PROCESS_TXT < cycle )
2325printk("\n[%s] thread[%x,%x] released TXT%d (no attached process) / cycle %d\n",
2326__FUNCTION__, this->process->pid, this->trdid, txt_id, cycle );
2327#endif
2328            }
2329        }
2330    }
2331    else
2332    {
2333
2334#if DEBUG_PROCESS_TXT
2335cycle = (uint32_t)hal_get_cycles();
2336if( DEBUG_PROCESS_TXT < cycle )
2337printk("\n[%s] thread[%x,%x] does nothing for process %x (not TXT owner) / cycle %d\n",
2338__FUNCTION__, this->process->pid, this->trdid, process_pid, cycle );
2339#endif
2340
2341    }
2342
2343}  // end process_txt_transfer_ownership()
2344
2345
2346////////////////////////////////////////////////
2347bool_t process_txt_is_owner( xptr_t process_xp )
2348{
2349    // get local pointer and cluster of process in owner cluster
2350    cxy_t       process_cxy = GET_CXY( process_xp );
2351    process_t * process_ptr = GET_PTR( process_xp );
2352
2353// check calling thread execute in target process owner cluster
2354pid_t process_pid = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid ) );
2355assert( (process_cxy == CXY_FROM_PID( process_pid )) ,
2356"process descriptor not in owner cluster" );
2357
2358    // get extended pointer on stdin pseudo file
2359    xptr_t file_xp = hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[0] ) );
2360
2361    // get pointers on TXT chdev
2362    xptr_t    txt_xp  = chdev_from_file( file_xp );
2363    cxy_t     txt_cxy = GET_CXY( txt_xp );
2364    chdev_t * txt_ptr = GET_PTR( txt_xp );
2365
2366    // get extended pointer on TXT_RX owner process
2367    xptr_t owner_xp = hal_remote_l64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) );
2368
2369    return (process_xp == owner_xp);
2370
2371}   // end process_txt_is_owner()
2372
2373////////////////////////////////////////////////     
2374xptr_t process_txt_get_owner( uint32_t channel )
2375{
2376    xptr_t      txt_rx_xp  = chdev_dir.txt_rx[channel];
2377    cxy_t       txt_rx_cxy = GET_CXY( txt_rx_xp );
2378    chdev_t *   txt_rx_ptr = GET_PTR( txt_rx_xp );
2379
2380    return (xptr_t)hal_remote_l64( XPTR( txt_rx_cxy , &txt_rx_ptr->ext.txt.owner_xp ) );
2381
2382}  // end process_txt_get_owner()
2383
2384///////////////////////////////////////////
2385void process_txt_display( uint32_t txt_id )
2386{
2387    xptr_t      chdev_xp;
2388    cxy_t       chdev_cxy;
2389    chdev_t   * chdev_ptr;
2390    xptr_t      root_xp;
2391    xptr_t      lock_xp;
2392    xptr_t      current_xp;
2393    xptr_t      iter_xp;
2394    cxy_t       txt0_cxy;
2395    chdev_t   * txt0_ptr;
2396    xptr_t      txt0_xp;
2397    xptr_t      txt0_lock_xp;
2398   
2399    assert( (txt_id < LOCAL_CLUSTER->nb_txt_channels) ,
2400    "illegal TXT terminal index" );
2401
2402    // get pointers on TXT0 chdev
2403    txt0_xp  = chdev_dir.txt_tx[0];
2404    txt0_cxy = GET_CXY( txt0_xp );
2405    txt0_ptr = GET_PTR( txt0_xp );
2406
2407    // get extended pointer on TXT0 lock
2408    txt0_lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock );
2409
2410    // get pointers on TXT_RX[txt_id] chdev
2411    chdev_xp  = chdev_dir.txt_rx[txt_id];
2412    chdev_cxy = GET_CXY( chdev_xp );
2413    chdev_ptr = GET_PTR( chdev_xp );
2414
2415    // get extended pointer on root & lock of attached process list
2416    root_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.root );
2417    lock_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.lock );
2418
2419    // get lock on attached process list
2420    remote_busylock_acquire( lock_xp );
2421
2422    // get TXT0 lock in busy waiting mode
2423    remote_busylock_acquire( txt0_lock_xp );
2424
2425    // display header
2426    nolock_printk("\n***** processes attached to TXT_%d / cycle %d\n",
2427    txt_id , (uint32_t)hal_get_cycles() );
2428
2429    // scan attached process list
2430    XLIST_FOREACH( root_xp , iter_xp )
2431    {
2432        current_xp  = XLIST_ELEMENT( iter_xp , process_t , txt_list );
2433        process_display( current_xp );
2434    }
2435
2436    // release TXT0 lock in busy waiting mode
2437    remote_busylock_release( txt0_lock_xp );
2438
2439    // release lock on attached process list
2440    remote_busylock_release( lock_xp );
2441
2442}  // end process_txt_display
Note: See TracBrowser for help on using the repository browser.