source: trunk/kernel/kern/process.c

Last change on this file was 683, checked in by alain, 3 years ago

All modifications required to support the <tcp_chat> application
including error recovery in case of packet loss.A

File size: 107.4 KB
RevLine 
[1]1/*
[564]2 * process.c - process related functions definition.
[172]3 *
[683]4 * Authors  Ghassan Almaless       (2008,2009,2010,2011,2012)
[1]5 *          Mohamed Lamine Karaoui (2015)
[683]6 *          Alain Greiner          (2016,2017,2018,2019,2020)
[1]7 *
8 * Copyright (c) UPMC Sorbonne Universites
9 *
[409]10 * This file is part of ALMOS-MKH.
[1]11 *
[172]12 * ALMOS-MKH is free software; you can redistribute it and/or modify it
[1]13 * under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; version 2.0 of the License.
15 *
[172]16 * ALMOS-MKH is distributed in the hope that it will be useful, but
[1]17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
[172]22 * along with ALMOS-MKH; if not, write to the Free Software Foundation,
[1]23 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24 */
25
[14]26#include <kernel_config.h>
[457]27#include <hal_kernel_types.h>
[1]28#include <hal_remote.h>
29#include <hal_uspace.h>
[409]30#include <hal_irqmask.h>
[623]31#include <hal_vmm.h>
[1]32#include <errno.h>
33#include <printk.h>
34#include <memcpy.h>
35#include <bits.h>
36#include <kmem.h>
37#include <page.h>
38#include <vmm.h>
39#include <vfs.h>
40#include <core.h>
41#include <thread.h>
[428]42#include <chdev.h>
[669]43#include <ksocket.h>
[1]44#include <list.h>
[407]45#include <string.h>
[1]46#include <scheduler.h>
[564]47#include <busylock.h>
48#include <queuelock.h>
49#include <remote_queuelock.h>
50#include <rwlock.h>
51#include <remote_rwlock.h>
[1]52#include <dqdt.h>
53#include <cluster.h>
54#include <ppm.h>
55#include <boot_info.h>
56#include <process.h>
57#include <elf.h>
[23]58#include <syscalls.h>
[435]59#include <shared_syscalls.h>
[1]60
61//////////////////////////////////////////////////////////////////////////////////////////
62// Extern global variables
63//////////////////////////////////////////////////////////////////////////////////////////
64
[428]65extern process_t           process_zero;     // allocated in kernel_init.c
66extern chdev_directory_t   chdev_dir;        // allocated in kernel_init.c
[1]67
68//////////////////////////////////////////////////////////////////////////////////////////
69// Process initialisation related functions
70//////////////////////////////////////////////////////////////////////////////////////////
71
[625]72////////////////////////////////////////////////////
73error_t process_reference_init( process_t * process,
74                                pid_t       pid,
75                                xptr_t      parent_xp )
[1]76{
[625]77    error_t     error;
[610]78    xptr_t      process_xp;
[428]79    cxy_t       parent_cxy;
80    process_t * parent_ptr;
[407]81    xptr_t      stdin_xp;
82    xptr_t      stdout_xp;
83    xptr_t      stderr_xp;
84    uint32_t    stdin_id;
85    uint32_t    stdout_id;
86    uint32_t    stderr_id;
[428]87    uint32_t    txt_id;
88    char        rx_path[40];
89    char        tx_path[40];
90    pid_t       parent_pid;
[625]91    vmm_t     * vmm;
[1]92
[683]93#if DEBUG_PROCESS_REFERENCE_INIT || DEBUG_PROCESS_ERROR
94thread_t * this  = CURRENT_THREAD;
95uint32_t   cycle = (uint32_t)hal_get_cycles();
96#endif
97
98    // build extended pointer on reference process
[610]99    process_xp = XPTR( local_cxy , process );
100
[625]101    // get pointer on process vmm
102    vmm = &process->vmm;
103
[428]104    // get parent process cluster and local pointer
105    parent_cxy = GET_CXY( parent_xp );
[435]106    parent_ptr = GET_PTR( parent_xp );
[204]107
[457]108    // get parent_pid
[564]109    parent_pid = hal_remote_l32( XPTR( parent_cxy , &parent_ptr->pid ) );
[428]110
[438]111#if DEBUG_PROCESS_REFERENCE_INIT
[610]112if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
[625]113printk("\n[%s] thread[%x,%x] enter to initialize process %x / cycle %d\n",
114__FUNCTION__, this->process->pid, this->trdid, pid, cycle );
[433]115#endif
[428]116
[610]117    // initialize pid, ref_xp, parent_xp, owner_xp, term_state fields
[433]118        process->pid        = pid;
119    process->ref_xp     = XPTR( local_cxy , process );
[443]120    process->owner_xp   = XPTR( local_cxy , process );
[433]121    process->parent_xp  = parent_xp;
122    process->term_state = 0;
[428]123
[610]124    // initialize VFS root inode and CWD inode
125    process->vfs_root_xp = hal_remote_l64( XPTR( parent_cxy, &parent_ptr->vfs_root_xp ) );
126    process->cwd_xp      = hal_remote_l64( XPTR( parent_cxy, &parent_ptr->cwd_xp ) );
127
[625]128    // initialize VSL as empty
129    vmm->vsegs_nr = 0;
130        xlist_root_init( XPTR( local_cxy , &vmm->vsegs_root ) );
[564]131
[625]132    // create an empty GPT as required by the architecture
133    error = hal_gpt_create( &vmm->gpt );
134    if( error ) 
135    {
[683]136
137#if DEBUG_PROCESS_ERROR
138printk("\n[ERROR] in %s : thread[%x,%x] cannot create empty GPT / cycle %d\n",
139__FUNCTION__, this->process->pid, this->trdid, cycle );
140#endif
[625]141        return -1;
142    }
143
144#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
145if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
146printk("\n[%s] thread[%x,%x] created empty GPT for process %x\n",
147__FUNCTION__, parent_pid, this->trdid, pid );
148#endif
149
[635]150    // initialize VSL lock
[625]151        remote_rwlock_init( XPTR( local_cxy , &vmm->vsl_lock ) , LOCK_VMM_VSL );
152
[635]153    // register kernel vsegs in user process VMM as required by the architecture
[625]154    error = hal_vmm_kernel_update( process );
155    if( error ) 
156    {
[683]157
158#if DEBUG_PROCESS_ERROR
159printk("\n[ERROR] in %s : thread[%x,%x] cannot register kernel vsegs in VMM / cycle %d\n",
160__FUNCTION__, this->process->pid, this->trdid, cycle );
161#endif
[625]162        printk("\n[ERROR] in %s : cannot register kernel vsegs in VMM\n", __FUNCTION__ );
163        return -1;
164    }
165
166#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
167if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
[635]168printk("\n[%s] thread[%x,%x] registered kernel vsegs in VSL for process %x\n",
[625]169__FUNCTION__, parent_pid, this->trdid, pid );
170#endif
171
172    // create "args" and "envs" vsegs
173    // create "stacks" and "mmap" vsegs allocators
174    // initialize locks protecting GPT and VSL
175    error = vmm_user_init( process );
176    if( error ) 
177    {
178        printk("\n[ERROR] in %s : cannot register user vsegs in VMM\n", __FUNCTION__ );
179        return -1;
180    }
[415]181 
[438]182#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
[433]183cycle = (uint32_t)hal_get_cycles();
[610]184if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
[625]185printk("\n[%s] thread[%x,%x] initialized vmm for process %x\n", 
186__FUNCTION__, parent_pid, this->trdid, pid );
[433]187#endif
[1]188
[409]189    // initialize fd_array as empty
[408]190    process_fd_init( process );
[1]191
[428]192    // define the stdin/stdout/stderr pseudo files <=> select a TXT terminal.
[581]193    if( (pid == 1) || (parent_pid  == 1) )      // INIT or KSH  process
[408]194    {
[581]195        // select a TXT channel
196        if( pid == 1 )  txt_id = 0;                     // INIT
197        else            txt_id = process_txt_alloc();   // KSH
[428]198
[457]199        // attach process to TXT
[669]200        process_txt_attach( process_xp , txt_id ); 
[428]201
[457]202#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
203cycle = (uint32_t)hal_get_cycles();
[610]204if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
205printk("\n[%s] thread[%x,%x] / process %x attached to TXT%d / cycle %d\n", 
206__FUNCTION__, parent_pid, this->trdid, pid, txt_id, cycle );
[457]207#endif
[428]208        // build path to TXT_RX[i] and TXT_TX[i] chdevs
[669]209        snprintk( rx_path , 40 , "/dev/external/txt%d_rx", txt_id );
210        snprintk( tx_path , 40 , "/dev/external/txt%d_tx", txt_id );
[428]211
212        // create stdin pseudo file         
[610]213        error = vfs_open(  process->vfs_root_xp,
[428]214                           rx_path,
[610]215                           process_xp,
[408]216                           O_RDONLY, 
217                           0,                // FIXME chmod
218                           &stdin_xp, 
219                           &stdin_id );
[625]220        if( error )
221        {
[683]222
223#if DEBUG_PROCESS_ERROR
224printk("\n[ERROR] in %s : thread[%x,%x] cannot open stdin pseudo file / cycle %d\n",
225__FUNCTION__, this->process->pid, this->trdid, cycle );
226#endif
[625]227            return -1;
228        }
[1]229
[669]230assert( __FUNCTION__, (stdin_id == 0) , "stdin index must be 0" );
[428]231
[440]232#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
233cycle = (uint32_t)hal_get_cycles();
[610]234if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
235printk("\n[%s] thread[%x,%x] / stdin open for process %x / cycle %d\n", 
236__FUNCTION__, parent_pid, this->trdid, pid, cycle );
[440]237#endif
238
[428]239        // create stdout pseudo file         
[610]240        error = vfs_open(  process->vfs_root_xp,
[428]241                           tx_path,
[610]242                           process_xp,
[408]243                           O_WRONLY, 
244                           0,                // FIXME chmod
245                           &stdout_xp, 
246                           &stdout_id );
[625]247        if( error )
248        {
[683]249
250#if DEBUG_PROCESS_ERROR
251printk("\n[ERROR] in %s : thread[%x,%x] cannot open stdout pseudo file / cycle %d\n",
252__FUNCTION__, this->process->pid, this->trdid, cycle );
253#endif
[625]254            return -1;
255        }
[1]256
[669]257assert( __FUNCTION__, (stdout_id == 1) , "stdout index must be 1" );
[428]258
[440]259#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
260cycle = (uint32_t)hal_get_cycles();
[610]261if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
262printk("\n[%s] thread[%x,%x] / stdout open for process %x / cycle %d\n", 
263__FUNCTION__, parent_pid, this->trdid, pid, cycle );
[440]264#endif
265
[428]266        // create stderr pseudo file         
[610]267        error = vfs_open(  process->vfs_root_xp,
[428]268                           tx_path,
[610]269                           process_xp,
[408]270                           O_WRONLY, 
271                           0,                // FIXME chmod
272                           &stderr_xp, 
273                           &stderr_id );
[625]274        if( error )
275        {
[683]276
277#if DEBUG_PROCESS_ERROR
278printk("\n[ERROR] in %s : thread[%x,%x] cannot open stderr pseudo file / cycle %d\n",
279__FUNCTION__, this->process->pid, this->trdid, cycle );
280#endif
[625]281            return -1;
282        }
[428]283
[669]284assert( __FUNCTION__, (stderr_id == 2) , "stderr index must be 2" );
[428]285
[440]286#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
287cycle = (uint32_t)hal_get_cycles();
[610]288if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
289printk("\n[%s] thread[%x,%x] / stderr open for process %x / cycle %d\n", 
290__FUNCTION__, parent_pid, this->trdid, pid, cycle );
[440]291#endif
292
[408]293    }
[428]294    else                                            // normal user process
[408]295    {
[669]296        // get parent process TXT index
297        txt_id = process_txt_get_index( parent_xp );
[440]298
[669]299        // attach child process to same TXT terminal as parent
300        process_txt_attach( process_xp , txt_id ); 
[407]301
[669]302        // recreate all open files from parent process fd_array to child process fd_array
[683]303        error = process_fd_replicate( process_xp , parent_xp );
304
305        if( error )
306        {
307
308#if DEBUG_PROCESS_ERROR
309printk("\n[ERROR] in %s : thread[%x,%x] cannot replicate fd_array / cycle %d\n",
310__FUNCTION__, this->process->pid, this->trdid, cycle );
311#endif
312            return -1;
313        }
314
[408]315    }
[407]316
[610]317    // initialize lock protecting CWD changes
[669]318    remote_busylock_init( XPTR( local_cxy , &process->cwd_lock ), LOCK_PROCESS_CWD );
[408]319
[438]320#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
[433]321cycle = (uint32_t)hal_get_cycles();
[610]322if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
323printk("\n[%s] thread[%x,%x] / set fd_array for process %x / cycle %d\n", 
324__FUNCTION__, parent_pid, this->trdid, pid , cycle );
[433]325#endif
[407]326
[408]327    // reset children list root
328    xlist_root_init( XPTR( local_cxy , &process->children_root ) );
329    process->children_nr     = 0;
[625]330    remote_queuelock_init( XPTR( local_cxy,
331                                 &process->children_lock ), LOCK_PROCESS_CHILDREN );
[407]332
[611]333    // reset semaphore / mutex / barrier / condvar list roots and lock
[408]334    xlist_root_init( XPTR( local_cxy , &process->sem_root ) );
335    xlist_root_init( XPTR( local_cxy , &process->mutex_root ) );
336    xlist_root_init( XPTR( local_cxy , &process->barrier_root ) );
337    xlist_root_init( XPTR( local_cxy , &process->condvar_root ) );
[625]338    remote_queuelock_init( XPTR( local_cxy , 
339                                 &process->sync_lock ), LOCK_PROCESS_USERSYNC );
[407]340
[611]341    // reset open directories root and lock
342    xlist_root_init( XPTR( local_cxy , &process->dir_root ) );
[625]343    remote_queuelock_init( XPTR( local_cxy , 
344                                 &process->dir_lock ), LOCK_PROCESS_DIR );
[611]345
[408]346    // register new process in the local cluster manager pref_tbl[]
347    lpid_t lpid = LPID_FROM_PID( pid );
348    LOCAL_CLUSTER->pmgr.pref_tbl[lpid] = XPTR( local_cxy , process );
[407]349
[408]350    // register new process descriptor in local cluster manager local_list
351    cluster_process_local_link( process );
[407]352
[408]353    // register new process descriptor in local cluster manager copies_list
354    cluster_process_copies_link( process );
[172]355
[564]356    // initialize th_tbl[] array and associated threads
[1]357    uint32_t i;
[564]358
359    for( i = 0 ; i < CONFIG_THREADS_MAX_PER_CLUSTER ; i++ )
[1]360        {
361        process->th_tbl[i] = NULL;
362    }
363    process->th_nr  = 0;
[564]364    rwlock_init( &process->th_lock , LOCK_PROCESS_THTBL );
[1]365
[124]366        hal_fence();
[1]367
[438]368#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
[433]369cycle = (uint32_t)hal_get_cycles();
[610]370if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
371printk("\n[%s] thread[%x,%x] exit for process %x / cycle %d\n", 
372__FUNCTION__, parent_pid, this->trdid, pid, cycle );
[433]373#endif
[101]374
[635]375#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
376hal_vmm_display( parent_xp , false );
377hal_vmm_display( XPTR( local_cxy , process ) , false );
378#endif
379
[625]380    return 0;
381
[428]382}  // process_reference_init()
[204]383
[1]384/////////////////////////////////////////////////////
385error_t process_copy_init( process_t * local_process,
386                           xptr_t      reference_process_xp )
387{
[625]388    error_t   error;
389    vmm_t   * vmm;
[415]390
[683]391#if DEBUG_PROCESS_COPY_INIT || DEBUG_PROCESS_ERROR
392thread_t * this = CURRENT_THREAD; 
393uint32_t cycle = (uint32_t)hal_get_cycles();
394#endif
395
[23]396    // get reference process cluster and local pointer
397    cxy_t       ref_cxy = GET_CXY( reference_process_xp );
[435]398    process_t * ref_ptr = GET_PTR( reference_process_xp );
[1]399
[625]400    // get pointer on process vmm
401    vmm = &local_process->vmm;
402
[428]403    // initialize PID, REF_XP, PARENT_XP, and STATE
[564]404    local_process->pid        = hal_remote_l32(  XPTR( ref_cxy , &ref_ptr->pid ) );
405    local_process->parent_xp  = hal_remote_l64( XPTR( ref_cxy , &ref_ptr->parent_xp ) );
[433]406    local_process->ref_xp     = reference_process_xp;
[443]407    local_process->owner_xp   = reference_process_xp;
[433]408    local_process->term_state = 0;
[407]409
[564]410#if DEBUG_PROCESS_COPY_INIT
[610]411if( DEBUG_PROCESS_COPY_INIT < cycle )
412printk("\n[%s] thread[%x,%x] enter for process %x / cycle %d\n",
413__FUNCTION__, this->process->pid, this->trdid, local_process->pid, cycle );
[433]414#endif
[407]415
[564]416// check user process
[669]417assert( __FUNCTION__, (local_process->pid != 0), "LPID cannot be 0" );
[564]418
[625]419    // initialize VSL as empty
420    vmm->vsegs_nr = 0;
421        xlist_root_init( XPTR( local_cxy , &vmm->vsegs_root ) );
[1]422
[625]423    // create an empty GPT as required by the architecture
424    error = hal_gpt_create( &vmm->gpt );
[683]425
[625]426    if( error ) 
427    {
[683]428
429#if DEBUG_PROCESS_ERROR
430printk("\n[ERROR] in %s : thread[%x,%x] cannot create empty GPT / cycle %d\n",
431__FUNCTION__, this->process->pid, this->trdid, cycle );
432#endif
[625]433        return -1;
434    }
435
436    // initialize GPT and VSL locks
437        remote_rwlock_init( XPTR( local_cxy , &vmm->vsl_lock ) , LOCK_VMM_VSL );
438
439    // register kernel vsegs in VMM as required by the architecture
440    error = hal_vmm_kernel_update( local_process );
[683]441
[625]442    if( error ) 
443    {
[683]444
445#if DEBUG_PROCESS_ERROR
446printk("\n[ERROR] in %s : thread[%x,%x] cannot register kernel vsegs in VMM / cycle %d\n",
447__FUNCTION__, this->process->pid, this->trdid, cycle );
448#endif
[625]449        return -1;
450    }
451
452    // create "args" and "envs" vsegs
453    // create "stacks" and "mmap" vsegs allocators
454    // initialize locks protecting GPT and VSL
455    error = vmm_user_init( local_process );
[683]456
[625]457    if( error ) 
458    {
[683]459
460#if DEBUG_PROCESS_ERROR
461printk("\n[ERROR] in %s : thread[%x,%x] cannot register user vsegs in VMM / cycle %d\n",
462__FUNCTION__, this->process->pid, this->trdid, cycle );
463#endif
[625]464        return -1;
465    }
466 
467#if (DEBUG_PROCESS_COPY_INIT & 1)
468cycle = (uint32_t)hal_get_cycles();
469if( DEBUG_PROCESS_COPY_INIT < cycle )
470printk("\n[%s] thread[%x,%x] initialized vmm for process %x / cycle %d\n", 
471__FUNCTION__, parent_pid, this->trdid, pid, cycle );
472#endif
473
474    // set process file descriptors array
[23]475        process_fd_init( local_process );
[1]476
[625]477    // set vfs_root_xp / vfs_bin_xp / cwd_xp fields
[564]478    local_process->vfs_root_xp = hal_remote_l64( XPTR( ref_cxy , &ref_ptr->vfs_root_xp ) );
479    local_process->vfs_bin_xp  = hal_remote_l64( XPTR( ref_cxy , &ref_ptr->vfs_bin_xp ) );
[610]480    local_process->cwd_xp      = XPTR_NULL;
[1]481
482    // reset children list root (not used in a process descriptor copy)
483    xlist_root_init( XPTR( local_cxy , &local_process->children_root ) );
[172]484    local_process->children_nr   = 0;
[564]485    remote_queuelock_init( XPTR( local_cxy , &local_process->children_lock ),
486                           LOCK_PROCESS_CHILDREN );
[1]487
[428]488    // reset children_list (not used in a process descriptor copy)
489    xlist_entry_init( XPTR( local_cxy , &local_process->children_list ) );
[1]490
491    // reset semaphores list root (not used in a process descriptor copy)
492    xlist_root_init( XPTR( local_cxy , &local_process->sem_root ) );
[23]493    xlist_root_init( XPTR( local_cxy , &local_process->mutex_root ) );
494    xlist_root_init( XPTR( local_cxy , &local_process->barrier_root ) );
495    xlist_root_init( XPTR( local_cxy , &local_process->condvar_root ) );
[1]496
[564]497    // initialize th_tbl[] array and associated fields
[1]498    uint32_t i;
[564]499    for( i = 0 ; i < CONFIG_THREADS_MAX_PER_CLUSTER ; i++ )
[1]500        {
501        local_process->th_tbl[i] = NULL;
502    }
503    local_process->th_nr  = 0;
[564]504    rwlock_init( &local_process->th_lock , LOCK_PROCESS_THTBL );
[1]505
506    // register new process descriptor in local cluster manager local_list
507    cluster_process_local_link( local_process );
508
509    // register new process descriptor in owner cluster manager copies_list
510    cluster_process_copies_link( local_process );
511
[124]512        hal_fence();
[1]513
[438]514#if DEBUG_PROCESS_COPY_INIT
[433]515cycle = (uint32_t)hal_get_cycles();
[610]516if( DEBUG_PROCESS_COPY_INIT < cycle )
517printk("\n[%s] thread[%x,%x] exit for process %x / cycle %d\n",
518__FUNCTION__, this->process->pid, this->trdid, local_process->pid, cycle );
[433]519#endif
[279]520
[1]521    return 0;
522
[204]523} // end process_copy_init()
524
[1]525///////////////////////////////////////////
526void process_destroy( process_t * process )
527{
[428]528    xptr_t      parent_xp;
529    process_t * parent_ptr;
530    cxy_t       parent_cxy;
531    xptr_t      children_lock_xp;
[446]532    xptr_t      children_nr_xp;
[1]533
[437]534    pid_t       pid = process->pid;
535
[593]536// check no more threads
[669]537assert( __FUNCTION__, (process->th_nr == 0),
[618]538"process %x in cluster %x contains threads", pid , local_cxy );
[428]539
[438]540#if DEBUG_PROCESS_DESTROY
[610]541thread_t * this = CURRENT_THREAD;
[433]542uint32_t cycle = (uint32_t)hal_get_cycles();
[610]543if( DEBUG_PROCESS_DESTROY < cycle )
544printk("\n[%s] thread[%x,%x] enter for process %x in cluster %x / cycle %d\n",
545__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy, cycle );
[433]546#endif
[428]547
[618]548    // Destroy VMM
549    vmm_destroy( process );
550
551#if (DEBUG_PROCESS_DESTROY & 1)
552if( DEBUG_PROCESS_DESTROY < cycle )
553printk("\n[%s] thread[%x,%x] destroyed VMM for process %x in cluster %x\n",
554__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy );
555#endif
556
[436]557    // remove process from local_list in local cluster manager
558    cluster_process_local_unlink( process );
[1]559
[618]560#if (DEBUG_PROCESS_DESTROY & 1)
561if( DEBUG_PROCESS_DESTROY < cycle )
562printk("\n[%s] thread[%x,%x] removed process %x in cluster %x from local list\n",
563__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy );
564#endif
565
[436]566    // remove process from copies_list in owner cluster manager
567    cluster_process_copies_unlink( process );
[23]568
[618]569#if (DEBUG_PROCESS_DESTROY & 1)
570if( DEBUG_PROCESS_DESTROY < cycle )
571printk("\n[%s] thread[%x,%x] removed process %x in cluster %x from copies list\n",
572__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy );
573#endif
574
[625]575    // when target process cluster is the owner cluster
576    // - remove process from TXT list and transfer ownership
577    // - remove process from children_list
578    // - release PID
[437]579    if( CXY_FROM_PID( pid ) == local_cxy )
[428]580    {
[625]581        process_txt_detach( XPTR( local_cxy , process ) );
582
583#if (DEBUG_PROCESS_DESTROY & 1)
584if( DEBUG_PROCESS_DESTROY < cycle )
585printk("\n[%s] thread[%x,%x] removed process %x from TXT list\n",
586__FUNCTION__, this->process->pid, this->trdid, pid );
587#endif
588
[428]589        // get pointers on parent process
590        parent_xp  = process->parent_xp;
591        parent_cxy = GET_CXY( parent_xp );
592        parent_ptr = GET_PTR( parent_xp );
593
594        // get extended pointer on children_lock in parent process
595        children_lock_xp = XPTR( parent_cxy , &parent_ptr->children_lock );
[446]596        children_nr_xp   = XPTR( parent_cxy , &parent_ptr->children_nr );
[428]597
598        // remove process from children_list
[564]599        remote_queuelock_acquire( children_lock_xp );
[428]600        xlist_unlink( XPTR( local_cxy , &process->children_list ) );
[446]601            hal_remote_atomic_add( children_nr_xp , -1 );
[564]602        remote_queuelock_release( children_lock_xp );
[450]603
[618]604#if (DEBUG_PROCESS_DESTROY & 1)
605if( DEBUG_PROCESS_DESTROY < cycle )
[625]606printk("\n[%s] thread[%x,%x] removed process %x from parent process children list\n",
607__FUNCTION__, this->process->pid, this->trdid, pid );
[618]608#endif
609
[564]610        // release the process PID to cluster manager
611        cluster_pid_release( pid );
[428]612
[618]613#if (DEBUG_PROCESS_DESTROY & 1)
614if( DEBUG_PROCESS_DESTROY < cycle )
615printk("\n[%s] thread[%x,%x] released process PID %x to pmgr in cluster %x\n",
616__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy );
617#endif
[23]618
[618]619    }
[1]620
[623]621    // FIXME decrement the refcount on file pointer for vfs_bin_xp [AG]
622
[618]623    // FIXME close all open files [AG]
[623]624
[618]625    // FIXME synchronize dirty files [AG]
[1]626
[416]627    // release memory allocated to process descriptor
[683]628        kmem_free( process , bits_log2(sizeof(process_t)) );
[1]629
[438]630#if DEBUG_PROCESS_DESTROY
[433]631cycle = (uint32_t)hal_get_cycles();
[610]632if( DEBUG_PROCESS_DESTROY < cycle )
[669]633printk("\n[%s] thread[%x,%x] exit for process %x in cluster %x / cycle %d\n",
[610]634__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy, cycle );
[433]635#endif
[428]636
[407]637}  // end process_destroy()
638
[583]639///////////////////////////////////////////////////////////////////
[527]640const char * process_action_str( process_sigactions_t action_type )
[409]641{
[583]642    switch ( action_type )
643    {
644        case BLOCK_ALL_THREADS:   return "BLOCK";
645        case UNBLOCK_ALL_THREADS: return "UNBLOCK";
646        case DELETE_ALL_THREADS:  return "DELETE";
647        default:                  return "undefined";
648    }
[409]649}
650
[435]651////////////////////////////////////////
652void process_sigaction( pid_t       pid,
[457]653                        uint32_t    type )
[409]654{
655    cxy_t              owner_cxy;         // owner cluster identifier
656    lpid_t             lpid;              // process index in owner cluster
657    cluster_t        * cluster;           // pointer on cluster manager
658    xptr_t             root_xp;           // extended pointer on root of copies
659    xptr_t             lock_xp;           // extended pointer on lock protecting copies
660    xptr_t             iter_xp;           // iterator on copies list
661    xptr_t             process_xp;        // extended pointer on process copy
662    cxy_t              process_cxy;       // process copy cluster identifier
[457]663    process_t        * process_ptr;       // local pointer on process copy
[436]664    reg_t              save_sr;           // for critical section
[457]665    thread_t         * client;            // pointer on client thread
666    xptr_t             client_xp;         // extended pointer on client thread
667    process_t        * local;             // pointer on process copy in local cluster
668    uint32_t           remote_nr;         // number of remote process copies
[619]669    rpc_desc_t         rpc;               // shared RPC descriptor
670    uint32_t           responses;         // shared RPC responses counter
[409]671
[457]672    client    = CURRENT_THREAD;
673    client_xp = XPTR( local_cxy , client );
674    local     = NULL;
675    remote_nr = 0;
[435]676
[583]677    // check calling thread can yield
678    thread_assert_can_yield( client , __FUNCTION__ );
[564]679
[438]680#if DEBUG_PROCESS_SIGACTION
[433]681uint32_t cycle = (uint32_t)hal_get_cycles();
[438]682if( DEBUG_PROCESS_SIGACTION < cycle )
[593]683printk("\n[%s] thread[%x,%x] enter to %s process %x / cycle %d\n",
[583]684__FUNCTION__ , client->process->pid, client->trdid,
[457]685process_action_str( type ) , pid , cycle );
[433]686#endif
[409]687
[436]688    // get pointer on local cluster manager
[416]689    cluster = LOCAL_CLUSTER;
690
[409]691    // get owner cluster identifier and process lpid
[435]692    owner_cxy = CXY_FROM_PID( pid );
693    lpid      = LPID_FROM_PID( pid );
[409]694
[593]695    // get root of list of copies and lock from owner cluster
[436]696    root_xp   = XPTR( owner_cxy , &cluster->pmgr.copies_root[lpid] );
697    lock_xp   = XPTR( owner_cxy , &cluster->pmgr.copies_lock[lpid] );
[435]698
[583]699// check action type
[669]700assert( __FUNCTION__, ((type == DELETE_ALL_THREADS ) ||
[583]701         (type == BLOCK_ALL_THREADS )  ||
702         (type == UNBLOCK_ALL_THREADS )), "illegal action type" );
[416]703             
[593]704    // This client thread send parallel RPCs to all remote clusters containing
[564]705    // target process copies, wait all responses, and then handles directly
706    // the threads in local cluster, when required.
[457]707    // The client thread allocates a - shared - RPC descriptor in the stack,
708    // because all parallel, non-blocking, server threads use the same input
709    // arguments, and use the shared RPC response field
[436]710
711    // mask IRQs
712    hal_disable_irq( &save_sr);
713
[457]714    // client thread blocks itself
715    thread_block( client_xp , THREAD_BLOCKED_RPC );
[436]716
[619]717    // initialize RPC responses counter
718    responses = 0;
719
[436]720    // initialize shared RPC descriptor
[619]721    // can be shared, because no out arguments
722    rpc.rsp       = &responses;
[438]723    rpc.blocking  = false;
724    rpc.index     = RPC_PROCESS_SIGACTION;
725    rpc.thread    = client;
726    rpc.lid       = client->core->lid;
[611]727    rpc.args[0]   = pid;
728    rpc.args[1]   = type;
[436]729
[611]730    // take the lock protecting process copies
731    remote_queuelock_acquire( lock_xp );
732
[457]733    // scan list of process copies
[409]734    XLIST_FOREACH( root_xp , iter_xp )
735    {
[457]736        // get extended pointers and cluster on process
[440]737        process_xp  = XLIST_ELEMENT( iter_xp , process_t , copies_list );
738        process_cxy = GET_CXY( process_xp );
[457]739        process_ptr = GET_PTR( process_xp );
[440]740
[593]741        if( process_cxy == local_cxy )    // process copy is local
[457]742        { 
743            local = process_ptr;
744        }
[593]745        else                              // process copy is remote
[457]746        {
747            // update number of remote process copies
748            remote_nr++;
749
[619]750            // atomically increment RPC responses counter
751            hal_atomic_add( &responses , 1 );
[457]752
[438]753#if DEBUG_PROCESS_SIGACTION
754if( DEBUG_PROCESS_SIGACTION < cycle )
[593]755printk("\n[%s] thread[%x,%x] send RPC to cluster %x for process %x\n",
[583]756__FUNCTION__, client->process->pid, client->trdid, process_cxy, pid );
[433]757#endif
[457]758            // call RPC in target cluster
[619]759            rpc_send( process_cxy , &rpc );
[457]760        }
761    }  // end list of copies
762
[409]763    // release the lock protecting process copies
[564]764    remote_queuelock_release( lock_xp );
[409]765
[436]766    // restore IRQs
767    hal_restore_irq( save_sr);
[409]768
[457]769    // - if there is remote process copies, the client thread deschedules,
770    //   (it will be unblocked by the last RPC server thread).
771    // - if there is no remote copies, the client thread unblock itself.
772    if( remote_nr )
773    {
774        sched_yield("blocked on rpc_process_sigaction");
775    } 
776    else
777    {
778        thread_unblock( client_xp , THREAD_BLOCKED_RPC );
779    }
[409]780
[457]781    // handle the local process copy if required
782    if( local != NULL )
783    {
784
785#if DEBUG_PROCESS_SIGACTION
786if( DEBUG_PROCESS_SIGACTION < cycle )
[593]787printk("\n[%s] thread[%x,%x] handles local process %x in cluster %x\n",
[583]788__FUNCTION__, client->process->pid, client->trdid, pid , local_cxy );
[457]789#endif
790        if     (type == DELETE_ALL_THREADS  ) process_delete_threads ( local , client_xp ); 
[583]791        else if(type == BLOCK_ALL_THREADS   ) process_block_threads  ( local ); 
[457]792        else if(type == UNBLOCK_ALL_THREADS ) process_unblock_threads( local );
793    }
794
[438]795#if DEBUG_PROCESS_SIGACTION
[433]796cycle = (uint32_t)hal_get_cycles();
[438]797if( DEBUG_PROCESS_SIGACTION < cycle )
[593]798printk("\n[%s] thread[%x,%x] exit after %s process %x / cycle %d\n",
[583]799__FUNCTION__, client->process->pid, client->trdid,
[457]800process_action_str( type ), pid, cycle );
[433]801#endif
[416]802
[409]803}  // end process_sigaction()
804
[433]805/////////////////////////////////////////////////
[583]806void process_block_threads( process_t * process )
[1]807{
[409]808    thread_t          * target;         // pointer on target thread
[433]809    thread_t          * this;           // pointer on calling thread
[564]810    uint32_t            ltid;           // index in process th_tbl[]
[409]811    uint32_t            count;          // requests counter
[593]812    volatile uint32_t   ack_count;      // acknowledges counter
[1]813
[416]814    // get calling thread pointer
[433]815    this = CURRENT_THREAD;
[407]816
[438]817#if DEBUG_PROCESS_SIGACTION
[564]818pid_t pid = process->pid;
[433]819uint32_t cycle = (uint32_t)hal_get_cycles();
[438]820if( DEBUG_PROCESS_SIGACTION < cycle )
[593]821printk("\n[%s] thread[%x,%x] enter for process %x in cluster %x / cycle %d\n",
[583]822__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy , cycle );
[433]823#endif
[409]824
[564]825// check target process is an user process
[669]826assert( __FUNCTION__, (LPID_FROM_PID( process->pid ) != 0 ),
[619]827"process %x is not an user process\n", process->pid );
[564]828
[409]829    // get lock protecting process th_tbl[]
[564]830    rwlock_rd_acquire( &process->th_lock );
[1]831
[440]832    // loop on target process local threads
[409]833    // we use both "ltid" and "count" because it can exist "holes" in th_tbl
[593]834    // - if the calling thread and the target thread are not running on the same
835    //   core, we ask the target scheduler to acknowlege the blocking
836    //   to be sure that the target thread is not running.
837    // - if the calling thread and the target thread are running on the same core,
838    //   we don't need confirmation from scheduler.
839           
[436]840    for( ltid = 0 , count = 0 , ack_count = 0 ; count < process->th_nr ; ltid++ )
[1]841    {
[409]842        target = process->th_tbl[ltid];
[1]843
[436]844        if( target != NULL )                                 // thread exist
[1]845        {
846            count++;
[409]847
[583]848            // set the global blocked bit in target thread descriptor.
849            thread_block( XPTR( local_cxy , target ) , THREAD_BLOCKED_GLOBAL );
[436]850 
[583]851            if( this->core->lid != target->core->lid )
852            {
853                // increment responses counter
854                hal_atomic_add( (void*)&ack_count , 1 );
[409]855
[583]856                // set FLAG_REQ_ACK and &ack_rsp_count in target descriptor
857                thread_set_req_ack( target , (uint32_t *)&ack_count );
[409]858
[583]859                // force scheduling on target thread
860                dev_pic_send_ipi( local_cxy , target->core->lid );
[409]861            }
[1]862        }
[172]863    }
864
[428]865    // release lock protecting process th_tbl[]
[564]866    rwlock_rd_release( &process->th_lock );
[416]867
[593]868    // wait other threads acknowledges  TODO this could be improved...
[409]869    while( 1 )
870    {
[610]871        // exit when all scheduler acknowledges received
[436]872        if ( ack_count == 0 ) break;
[409]873   
874        // wait 1000 cycles before retry
875        hal_fixed_delay( 1000 );
876    }
[1]877
[438]878#if DEBUG_PROCESS_SIGACTION
[433]879cycle = (uint32_t)hal_get_cycles();
[438]880if( DEBUG_PROCESS_SIGACTION < cycle )
[593]881printk("\n[%s] thread[%x,%x] exit for process %x in cluster %x / cycle %d\n",
882__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy , cycle );
[433]883#endif
[409]884
[428]885}  // end process_block_threads()
[409]886
[440]887/////////////////////////////////////////////////
888void process_delete_threads( process_t * process,
889                             xptr_t      client_xp )
[409]890{
[440]891    thread_t          * target;        // local pointer on target thread
892    xptr_t              target_xp;     // extended pointer on target thread
893    cxy_t               owner_cxy;     // owner process cluster
[409]894    uint32_t            ltid;          // index in process th_tbl
[440]895    uint32_t            count;         // threads counter
[409]896
[433]897    // get calling thread pointer
[409]898
[440]899    // get target process owner cluster
900    owner_cxy = CXY_FROM_PID( process->pid );
901
[438]902#if DEBUG_PROCESS_SIGACTION
[633]903thread_t * this  = CURRENT_THREAD;
904uint32_t   cycle = (uint32_t)hal_get_cycles();
[438]905if( DEBUG_PROCESS_SIGACTION < cycle )
[625]906printk("\n[%s] thread[%x,%x] enter for process %x n cluster %x / cycle %d\n",
907__FUNCTION__, this->process->pid, this->trdid, process->pid, local_cxy, cycle );
[433]908#endif
909
[564]910// check target process is an user process
[669]911assert( __FUNCTION__, (LPID_FROM_PID( process->pid ) != 0),
[619]912"process %x is not an user process\n", process->pid );
[564]913
[409]914    // get lock protecting process th_tbl[]
[583]915    rwlock_wr_acquire( &process->th_lock );
[409]916
[440]917    // loop on target process local threads                       
[416]918    // we use both "ltid" and "count" because it can exist "holes" in th_tbl
[440]919    for( ltid = 0 , count = 0  ; count < process->th_nr ; ltid++ )
[1]920    {
[409]921        target = process->th_tbl[ltid];
[1]922
[440]923        if( target != NULL )    // valid thread 
[1]924        {
[416]925            count++;
[440]926            target_xp = XPTR( local_cxy , target );
[1]927
[564]928            // main thread and client thread should not be deleted
[440]929            if( ((ltid != 0) || (owner_cxy != local_cxy)) &&         // not main thread
930                (client_xp) != target_xp )                           // not client thread
931            {
932                // mark target thread for delete and block it
[669]933                thread_delete_request( target_xp , true );                   // forced
[440]934            }
[409]935        }
936    }
[1]937
[428]938    // release lock protecting process th_tbl[]
[583]939    rwlock_wr_release( &process->th_lock );
[407]940
[438]941#if DEBUG_PROCESS_SIGACTION
[433]942cycle = (uint32_t)hal_get_cycles();
[438]943if( DEBUG_PROCESS_SIGACTION < cycle )
[593]944printk("\n[%s] thread[%x,%x] exit for process %x in cluster %x / cycle %d\n",
945__FUNCTION__, this->process->pid, this->trdid, process->pid, local_cxy , cycle );
[433]946#endif
[407]947
[440]948}  // end process_delete_threads()
[409]949
[440]950///////////////////////////////////////////////////
951void process_unblock_threads( process_t * process )
[409]952{
[440]953    thread_t          * target;        // pointer on target thead
[409]954    uint32_t            ltid;          // index in process th_tbl
[440]955    uint32_t            count;         // requests counter
[409]956
[438]957#if DEBUG_PROCESS_SIGACTION
[633]958thread_t * this  = CURRENT_THREAD;
959pid_t      pid   = process->pid;
960uint32_t   cycle = (uint32_t)hal_get_cycles();
[438]961if( DEBUG_PROCESS_SIGACTION < cycle )
[593]962printk("\n[%s] thread[%x,%x] enter for process %x in cluster %x / cycle %d\n",
[583]963__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy , cycle );
[433]964#endif
965
[564]966// check target process is an user process
[669]967assert( __FUNCTION__, ( LPID_FROM_PID( process->pid ) != 0 ),
[619]968"process %x is not an user process\n", process->pid );
[564]969
[416]970    // get lock protecting process th_tbl[]
[564]971    rwlock_rd_acquire( &process->th_lock );
[416]972
[440]973    // loop on process threads to unblock all threads
[416]974    // we use both "ltid" and "count" because it can exist "holes" in th_tbl
[440]975    for( ltid = 0 , count = 0 ; count < process->th_nr ; ltid++ )
[409]976    {
[416]977        target = process->th_tbl[ltid];
[409]978
[440]979        if( target != NULL )             // thread found
[409]980        {
981            count++;
[440]982
983            // reset the global blocked bit in target thread descriptor.
984            thread_unblock( XPTR( local_cxy , target ) , THREAD_BLOCKED_GLOBAL );
[1]985        }
986    }
987
[428]988    // release lock protecting process th_tbl[]
[564]989    rwlock_rd_release( &process->th_lock );
[407]990
[438]991#if DEBUG_PROCESS_SIGACTION
[433]992cycle = (uint32_t)hal_get_cycles();
[438]993if( DEBUG_PROCESS_SIGACTION < cycle )
[593]994printk("\n[%s] thread[%x,%x] exit for process %x in cluster %x / cycle %d\n",
[583]995__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy, cycle );
[433]996#endif
[1]997
[440]998}  // end process_unblock_threads()
[407]999
[1]1000///////////////////////////////////////////////
1001process_t * process_get_local_copy( pid_t pid )
1002{
1003    error_t        error;
[683]1004    process_t    * process;       // local pointer on process
[23]1005    xptr_t         process_xp;    // extended pointer on process
[1]1006
[683]1007#if DEBUG_PROCESS_GET_LOCAL_COPY || DEBUG_PROCESS_ERROR
1008thread_t * this  = CURRENT_THREAD;
1009uint32_t   cycle = (uint32_t)hal_get_cycles();
1010#endif
1011
[1]1012    cluster_t * cluster = LOCAL_CLUSTER;
1013
[564]1014#if DEBUG_PROCESS_GET_LOCAL_COPY
1015if( DEBUG_PROCESS_GET_LOCAL_COPY < cycle )
[593]1016printk("\n[%s] thread[%x,%x] enter for process %x in cluster %x / cycle %d\n",
[583]1017__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy, cycle );
[564]1018#endif
1019
[1]1020    // get lock protecting local list of processes
[564]1021    remote_queuelock_acquire( XPTR( local_cxy , &cluster->pmgr.local_lock ) );
[1]1022
1023    // scan the local list of process descriptors to find the process
[23]1024    xptr_t  iter;
1025    bool_t  found = false;
1026    XLIST_FOREACH( XPTR( local_cxy , &cluster->pmgr.local_root ) , iter )
[1]1027    {
[23]1028        process_xp  = XLIST_ELEMENT( iter , process_t , local_list );
[683]1029        process     = GET_PTR( process_xp );
1030        if( process->pid == pid )
[1]1031        {
1032            found = true;
1033            break;
1034        }
1035    }
1036
1037    // release lock protecting local list of processes
[564]1038    remote_queuelock_release( XPTR( local_cxy , &cluster->pmgr.local_lock ) );
[1]1039
[172]1040    // allocate memory for a new local process descriptor
[440]1041    // and initialise it from reference cluster if not found
[1]1042    if( !found )
1043    {
1044        // get extended pointer on reference process descriptor
[23]1045        xptr_t ref_xp = cluster_get_reference_process_from_pid( pid );
[1]1046
[669]1047        assert( __FUNCTION__, (ref_xp != XPTR_NULL) , "illegal pid\n" );
[23]1048
[1]1049        // allocate memory for local process descriptor
[683]1050        process = kmem_alloc( bits_log2(sizeof(process_t)) , AF_ZERO );
[443]1051
[683]1052        if( process == NULL )  return NULL;
[1]1053
1054        // initialize local process descriptor copy
[683]1055        error = process_copy_init( process , ref_xp );
[443]1056
[683]1057        if( error )
1058        {
1059
1060#if DEBUG_PROCESS_ERROR
1061printk("\n[ERROR] in %s : thread[%x,%x] cannot initialize local process copy / cycle %d\n",
1062__FUNCTION__, this->process->pid, this->trdid, cycle );
1063#endif
1064            return NULL;
1065        }
[1]1066    }
1067
[440]1068#if DEBUG_PROCESS_GET_LOCAL_COPY
[564]1069cycle = (uint32_t)hal_get_cycles();
[440]1070if( DEBUG_PROCESS_GET_LOCAL_COPY < cycle )
[593]1071printk("\n[%s] thread[%x,%x] exit in cluster %x / process %x / cycle %d\n",
[683]1072__FUNCTION__, this->process->pid, this->trdid, local_cxy, process, cycle );
[440]1073#endif
1074
[683]1075    return process;
[1]1076
[409]1077}  // end process_get_local_copy()
1078
[436]1079////////////////////////////////////////////
1080pid_t process_get_ppid( xptr_t  process_xp )
1081{
1082    cxy_t       process_cxy;
1083    process_t * process_ptr;
1084    xptr_t      parent_xp;
1085    cxy_t       parent_cxy;
1086    process_t * parent_ptr;
1087
1088    // get process cluster and local pointer
1089    process_cxy = GET_CXY( process_xp );
1090    process_ptr = GET_PTR( process_xp );
1091
1092    // get pointers on parent process
[564]1093    parent_xp  = (xptr_t)hal_remote_l64( XPTR( process_cxy , &process_ptr->parent_xp ) );
[436]1094    parent_cxy = GET_CXY( parent_xp );
1095    parent_ptr = GET_PTR( parent_xp );
1096
[564]1097    return hal_remote_l32( XPTR( parent_cxy , &parent_ptr->pid ) );
[436]1098}
1099
[1]1100//////////////////////////////////////////////////////////////////////////////////////////
1101// File descriptor array related functions
1102//////////////////////////////////////////////////////////////////////////////////////////
1103
1104///////////////////////////////////////////
[662]1105char * process_fd_type_str( uint32_t type )
1106{
1107    switch( type )
1108    {
[669]1109        case FILE_TYPE_REG : return "FILE";
1110        case FILE_TYPE_DIR  : return "DIR";
1111        case FILE_TYPE_FIFO : return "FIFO";
1112        case FILE_TYPE_PIPE : return "PIPE";
1113        case FILE_TYPE_SOCK : return "SOCK";
1114        case FILE_TYPE_DEV  : return "DEV";
1115        case FILE_TYPE_BLK  : return "BLK";
1116        case FILE_TYPE_SYML : return "SYML";
[662]1117       
1118        default              : return "undefined";
1119    }
1120}
1121   
1122///////////////////////////////////////////
[1]1123void process_fd_init( process_t * process )
1124{
1125    uint32_t fd;
1126
[610]1127    // initialize lock
[564]1128    remote_queuelock_init( XPTR( local_cxy , &process->fd_array.lock ), LOCK_PROCESS_FDARRAY );
[1]1129
[610]1130    // initialize number of open files
[662]1131    process->fd_array.max = 0;
[23]1132
[1]1133    // initialize array
[23]1134    for ( fd = 0 ; fd < CONFIG_PROCESS_FILE_MAX_NR ; fd++ )
[1]1135    {
1136        process->fd_array.array[fd] = XPTR_NULL;
1137    }
1138}
[635]1139
[610]1140////////////////////////////////////////////////////
1141error_t process_fd_register( xptr_t      process_xp,
[407]1142                             xptr_t      file_xp,
1143                             uint32_t  * fdid )
[1]1144{
1145    bool_t    found;
[23]1146    uint32_t  id;
[662]1147    uint32_t  max;             // current value of max non-free slot index
1148    xptr_t    entry_xp;        // current value of one fd_array entry
1149    xptr_t    lock_xp;         // extended pointer on lock protecting fd_array
1150    xptr_t    max_xp;          // extended pointer on max field in fd_array
[1]1151
[683]1152#if DEBUG_PROCESS_FD_REGISTER
1153thread_t * this  = CURRENT_THREAD;
1154uint32_t   cycle = (uint32_t)hal_get_cycles();
1155#endif
1156
[657]1157    // get target process cluster and local pointer
[610]1158    process_t * process_ptr = GET_PTR( process_xp );
1159    cxy_t       process_cxy = GET_CXY( process_xp );
[23]1160
[662]1161// check target process is owner process
[669]1162assert( __FUNCTION__, (process_xp == hal_remote_l64( XPTR( process_cxy , &process_ptr->owner_xp ) ) ),
[662]1163"process must be owner process\n" );
[610]1164
1165#if DEBUG_PROCESS_FD_REGISTER
[683]1166pid_t  tgt_pid   = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid) );
[610]1167if( DEBUG_PROCESS_FD_REGISTER < cycle )
1168printk("\n[%s] thread[%x,%x] enter for process %x / cycle %d\n",
[683]1169__FUNCTION__, this->process->pid, this->trdid, tgt_pid, cycle );
[610]1170#endif
1171
[662]1172    // build extended pointers on lock & max
1173    lock_xp = XPTR( process_cxy , &process_ptr->fd_array.lock );
1174    max_xp  = XPTR( process_cxy , &process_ptr->fd_array.max );
[610]1175
[669]1176    // take lock protecting fd_array
[610]1177        remote_queuelock_acquire( lock_xp );
[23]1178
[1]1179    found   = false;
1180
[662]1181    // get current value of max_fdid
1182    max = hal_remote_l32( max_xp );
1183
[23]1184    for ( id = 0; id < CONFIG_PROCESS_FILE_MAX_NR ; id++ )
[1]1185    {
[662]1186        // get fd_array entry
1187        entry_xp = hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[id] ) );
1188       
[669]1189        // take the first empty slot
[662]1190        if ( entry_xp == XPTR_NULL )
[1]1191        {
[662]1192            // update  fd_array
[610]1193            hal_remote_s64( XPTR( process_cxy , &process_ptr->fd_array.array[id] ) , file_xp );
[564]1194
[662]1195            // update max when required
1196            if( id > max ) hal_remote_s32( max_xp , id );
1197
1198            // exit loop
[564]1199                        *fdid = id;
[1]1200            found = true;
1201            break;
1202        }
1203    }
1204
[610]1205    // release lock protecting fd_array
1206        remote_queuelock_release( lock_xp );
[1]1207
[610]1208#if DEBUG_PROCESS_FD_REGISTER
1209cycle = (uint32_t)hal_get_cycles();
1210if( DEBUG_PROCESS_FD_REGISTER < cycle )
1211printk("\n[%s] thread[%x,%x] exit for process %x / fdid %d / cycle %d\n",
[683]1212__FUNCTION__, this->process->pid, this->trdid, tgt_pid, id, cycle );
[610]1213#endif
1214
[428]1215    if ( !found ) return -1;
[1]1216    else          return 0;
1217
[610]1218}  // end process_fd_register()
1219
[657]1220/////////////////////////////////////////////
1221void process_fd_remove( xptr_t    process_xp,
1222                        uint32_t  fdid )
1223{
1224    pid_t       pid;           // target process PID
1225    lpid_t      lpid;          // target process LPID
[662]1226    xptr_t      file_xp;       // extended pointer on file descriptor
[657]1227    xptr_t      iter_xp;       // iterator for list of process copies
1228    xptr_t      copy_xp;       // extended pointer on process copy
1229    process_t * copy_ptr;      // local pointer on process copy 
1230    cxy_t       copy_cxy;      // process copy cluster identifier
1231
1232    // get target process cluster and local pointer
1233    process_t * process_ptr = GET_PTR( process_xp );
1234    cxy_t       process_cxy = GET_CXY( process_xp );
1235
[662]1236// check target process is owner process
[669]1237assert( __FUNCTION__, (process_xp == hal_remote_l64( XPTR( process_cxy , &process_ptr->owner_xp ) ) ),
[662]1238"process must be owner process\n" );
1239
[657]1240    // get target process pid and lpid
1241    pid  = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid) );
1242    lpid = LPID_FROM_PID( pid );
1243
1244#if DEBUG_PROCESS_FD_REMOVE
1245uint32_t    cycle = (uint32_t)hal_get_cycles();
1246thread_t  * this  = CURRENT_THREAD;
1247if( DEBUG_PROCESS_FD_REMOVE < cycle )
1248printk("\n[%s] thread[%x,%x] enter for fdid %d in process %x / cycle %d\n",
1249__FUNCTION__, this->process->pid, this->trdid, fdid, pid, cycle );
1250#endif
1251
[662]1252    // get extended pointer on file descriptor
1253    file_xp = hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[fdid] ));
1254
[657]1255    // build extended pointers on list_of_copies root and lock (in owner cluster)
1256    xptr_t copies_root_xp = XPTR( process_cxy , &LOCAL_CLUSTER->pmgr.copies_root[lpid] );
1257    xptr_t copies_lock_xp = XPTR( process_cxy , &LOCAL_CLUSTER->pmgr.copies_lock[lpid] );
1258 
[662]1259    // build extended pointer on fd_array lock and max
1260    xptr_t fd_lock_xp = XPTR( process_cxy , &process_ptr->fd_array.lock );
1261    xptr_t fd_max_xp  = XPTR( process_cxy , &process_ptr->fd_array.max );
[657]1262
[662]1263    // take lock protecting fd_array
[657]1264        remote_queuelock_acquire( fd_lock_xp );
1265
1266    // take the lock protecting the list of copies
1267    remote_queuelock_acquire( copies_lock_xp );
1268
[662]1269    // get max value
1270    uint32_t max = hal_remote_l32( fd_max_xp );
1271
[657]1272    // loop on list of process copies
1273    XLIST_FOREACH( copies_root_xp , iter_xp )
1274    {
1275        // get pointers on process copy
1276        copy_xp  = XLIST_ELEMENT( iter_xp , process_t , copies_list );
1277        copy_ptr = GET_PTR( copy_xp );
1278        copy_cxy = GET_CXY( copy_xp );
1279
1280        // release the fd_array entry in process copy
1281        hal_remote_s64( XPTR( copy_cxy , &copy_ptr->fd_array.array[fdid] ), XPTR_NULL );
1282    }
1283
[662]1284    // update max when required
1285    if( fdid == max ) hal_remote_s32( fd_max_xp , max-1 );
1286
[669]1287    // release the lock protecting fd_array
[657]1288        remote_queuelock_release( fd_lock_xp );
1289
1290    // release the lock protecting the list of copies
1291    remote_queuelock_release( copies_lock_xp );
1292
1293#if DEBUG_PROCESS_FD_REMOVE
1294cycle = (uint32_t)hal_get_cycles();
1295if( DEBUG_PROCESS_FD_REMOVE < cycle )
1296printk("\n[%s] thread[%x,%x] exit for fdid %d in process %x / cycle %d\n",
1297__FUNCTION__, this->process->pid, this->trdid, fdid, pid, cycle );
1298#endif
1299
1300}  // end process_fd_remove()
1301
[662]1302//////////////////////////////////////////////
1303void process_fd_clean_all( xptr_t process_xp )
[1]1304{
[669]1305    uint32_t  fdid;
[662]1306    xptr_t    file_xp;         // one fd_array entry
1307    xptr_t    lock_xp;         // extendad pointer on lock protecting fd_array
1308    uint32_t  max;             // number of registered files
1309
1310    // get process cluster, local pointer and PID
1311    process_t * process_ptr = GET_PTR( process_xp );
1312    cxy_t       process_cxy = GET_CXY( process_xp );
1313
1314// check target process is owner process
[669]1315assert( __FUNCTION__, (process_xp == hal_remote_l64( XPTR( process_cxy , &process_ptr->owner_xp )) ),
[662]1316"process must be owner process\n" );
1317
1318#if DEBUG_PROCESS_FD_CLEAN_ALL
1319thread_t * this  = CURRENT_THREAD;
1320uint32_t   cycle = (uint32_t)hal_get_cycles();
1321if( DEBUG_PROCESS_FD_CLEAN_ALL < cycle )
[669]1322printk("\n[%s] thread[%x,%x] enter / cycle %d\n",
1323__FUNCTION__, this->process->pid, this->trdid, cycle );
[662]1324
1325process_fd_display( process_xp );
1326#endif
1327
1328    // build extended pointer on lock protecting the fd_array
1329    lock_xp = XPTR( process_cxy , &process_ptr->fd_array.lock );
1330
1331    // get max index for fd_array
1332    max = hal_remote_l32( XPTR( process_cxy , &process_ptr->fd_array.max ));
1333
1334    // take lock protecting fd_array
1335        remote_queuelock_acquire( lock_xp );
1336
[669]1337    for( fdid = 0 ; fdid <= max ; fdid++ )
[662]1338    {
1339        // get fd_array entry
[669]1340        file_xp = hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[fdid] ) );
[662]1341       
1342        if ( file_xp != XPTR_NULL )
1343        {
[669]1344            vfs_file_t * file_ptr = GET_PTR( file_xp );
1345            cxy_t        file_cxy = GET_CXY( file_xp );
[662]1346
[669]1347            // get file type
1348            uint32_t file_type = hal_remote_l32( XPTR( file_cxy , &file_ptr->type ));
1349 
1350            if( file_type == FILE_TYPE_REG )
1351            {
1352                vfs_close( file_xp , fdid );
1353            }
1354            if( file_type == FILE_TYPE_SOCK )
1355            {
1356                socket_close( file_xp , fdid );
1357            }
[662]1358        }
1359    }
1360
1361    // release lock protecting fd_array
1362        remote_queuelock_release( lock_xp );
1363
1364#if DEBUG_PROCESS_FD_CLEAN_ALL
1365cycle = (uint32_t)hal_get_cycles();
1366if( DEBUG_PROCESS_FD_CLEAN_ALL < cycle )
1367printk("\n[%s] thread[%x,%x] exit for process %x / cycle %d\n",
1368__FUNCTION__, this->process->pid, this->trdid, pid, cycle );
1369#endif
1370
1371}  // end process_fd_clean_all()
1372
1373//////////////////////////////////////////////////////////////
1374xptr_t process_fd_get_xptr_from_owner( xptr_t      process_xp,
1375                                       uint32_t    fdid )
1376{
1377    cxy_t       process_cxy = GET_CXY( process_xp );
1378    process_t * process_ptr = GET_PTR( process_xp );
1379
[669]1380assert( __FUNCTION__, (hal_remote_l64( XPTR( process_cxy , &process_ptr->owner_xp )) == process_xp),
[662]1381"process_xp argument must be the owner process" );
1382
1383    // access owner process fd_array
1384    return hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[fdid] ));
1385
1386}  // end process_fd_get_xptr_from_owner()
1387
1388///////////////////////////////////////////////////////////
1389xptr_t process_fd_get_xptr_from_local( process_t * process,
1390                                       uint32_t    fdid )
1391{
[23]1392    xptr_t  file_xp;
[564]1393    xptr_t  lock_xp;
[1]1394
[23]1395    // access local copy of process descriptor
[407]1396    file_xp = process->fd_array.array[fdid];
[1]1397
[23]1398    if( file_xp == XPTR_NULL )
1399    {
[662]1400        // get owner process cluster and local pointer
1401        xptr_t      owner_xp  = process->owner_xp;
1402        cxy_t       owner_cxy = GET_CXY( owner_xp );
1403        process_t * owner_ptr = GET_PTR( owner_xp );
[1]1404
[662]1405        // build extended pointer on lock protecting fd_array
1406        lock_xp = XPTR( owner_cxy , &owner_ptr->fd_array.lock );
[564]1407
[662]1408        // take lock protecting fd_array
[564]1409            remote_queuelock_acquire( lock_xp );
1410
[669]1411        // access owner process descriptor
[662]1412        file_xp = hal_remote_l64( XPTR( owner_cxy , &owner_ptr->fd_array.array[fdid] ) );
[1]1413
[662]1414        if( file_xp != XPTR_NULL ) 
1415        {
1416           // update local fd_array
1417            process->fd_array.array[fdid] = file_xp;
1418        }
1419
1420        // release lock protecting fd_array
[564]1421            remote_queuelock_release( lock_xp );
[23]1422    }
[1]1423
[23]1424    return file_xp;
[1]1425
[662]1426}  // end process_fd_get_xptr_from_local()
[407]1427
[683]1428////////////////////////////////////////////
1429error_t process_fd_replicate( xptr_t dst_xp,
1430                              xptr_t src_xp )
[1]1431{
[669]1432    uint32_t fdid;      // current file descriptor index
1433    xptr_t   old_xp;    // extended pointer on a file descriptor (stored in SRC fd_array)
1434    xptr_t   new_xp;    // extended pointer on a file descriptor (stored in DST fd_array)
1435    error_t  error;
[1]1436
[669]1437    // get cluster and local pointer for SRC process
1438    cxy_t       src_cxy = GET_CXY( src_xp );
1439    process_t * src_ptr = GET_PTR( src_xp );
[1]1440
[669]1441assert( __FUNCTION__, (src_xp == hal_remote_l64( XPTR( src_cxy , &src_ptr->owner_xp ))),
1442"src_xp process not in owner cluster" );
[1]1443
[669]1444    // get cluster and local pointer for DST fd_array
1445    cxy_t       dst_cxy = GET_CXY( dst_xp );
1446    process_t * dst_ptr = GET_PTR( dst_xp );
1447
1448assert( __FUNCTION__, (dst_xp == hal_remote_l64( XPTR( dst_cxy , &dst_ptr->owner_xp ))),
1449"dst_xp process not in owner cluster" );
1450
1451    // build extende pointers on SRC fd_array lock and max fields
1452    xptr_t  src_lock_xp = XPTR( src_cxy , &src_ptr->fd_array.lock );
1453    xptr_t  src_max_xp  = XPTR( src_cxy , &src_ptr->fd_array.max );
1454
[1]1455    // get the remote lock protecting the src fd_array
[669]1456        remote_queuelock_acquire( src_lock_xp );
1457 
1458    // loop on fd_array entries
1459    for( fdid = 0 ; fdid <= hal_remote_l32( src_max_xp ) ; fdid++ )
[1]1460        {
[669]1461                old_xp = (xptr_t)hal_remote_l64( XPTR( src_cxy , &src_ptr->fd_array.array[fdid] ) );
[1]1462
[669]1463                if( old_xp != XPTR_NULL )
[1]1464                {
[669]1465            // get the existing file descriptor cluster and local pointer
1466            vfs_file_t * old_ptr = GET_PTR( old_xp );
1467            cxy_t        old_cxy = GET_CXY( old_xp );
[1]1468
[669]1469            // get existing file attributes and local pointer on inode
1470            uint32_t      attr      = hal_remote_l32( XPTR( old_cxy , &old_ptr->attr ) );
1471            vfs_inode_t * inode_ptr = hal_remote_lpt( XPTR( old_cxy , &old_ptr->inode ) );
1472
1473            // create a new file descriptor in same cluster as the existing one
1474            error = vfs_file_create( XPTR( old_cxy , inode_ptr ),
1475                                     attr,
1476                                     &new_xp );
1477            if( error )
1478            {
[683]1479
1480#if DEBUG_PROCESS_ERROR
1481thread_t * this  = CURRENT_THREAD;
1482uint32_t   cycle = (uint32_t)hal_get_cycles();
1483printk("\n[ERROR] in %s : thread[%x,%x] cannot create file descriptor / cycle %d\n",
1484__FUNCTION__, this->process->pid, this->trdid, cycle );
1485#endif
1486                return -1;
[669]1487            }
1488
1489                        // register new_xp in DST fd_array
1490                        hal_remote_s64( XPTR( dst_cxy , &dst_ptr->fd_array.array[fdid] ) , new_xp );
[1]1491                }
1492        }
1493
1494    // release lock on source process fd_array
[669]1495        remote_queuelock_release( src_lock_xp );
[1]1496
[683]1497    return 0;
1498
[669]1499}  // end process_fd_replicate()
[407]1500
[564]1501
1502////////////////////////////////////
1503bool_t process_fd_array_full( void )
1504{
[662]1505    // get extended pointer on owner process
1506    xptr_t owner_xp = CURRENT_THREAD->process->owner_xp;
[564]1507
[662]1508    // get owner process cluster and local pointer
1509    process_t * owner_ptr = GET_PTR( owner_xp );
1510    cxy_t       owner_cxy = GET_CXY( owner_xp );
[564]1511
[662]1512    // get number of open file descriptors from  fd_array
1513    uint32_t max = hal_remote_l32( XPTR( owner_cxy , &owner_ptr->fd_array.max ));
[564]1514
[662]1515        return ( max == CONFIG_PROCESS_FILE_MAX_NR - 1 );
[564]1516}
1517
[662]1518////////////////////////////////////////////
1519void process_fd_display( xptr_t process_xp )
1520{
1521    uint32_t      fdid;
1522    xptr_t        file_xp;
1523    vfs_file_t *  file_ptr;
1524    cxy_t         file_cxy;
1525    uint32_t      file_type;
1526    xptr_t        inode_xp;
1527    vfs_inode_t * inode_ptr;
[564]1528
[662]1529    char          name[CONFIG_VFS_MAX_NAME_LENGTH];
1530
1531    // get process cluster and local pointer
1532    process_t * process_ptr = GET_PTR( process_xp );
1533    cxy_t       process_cxy = GET_CXY( process_xp );
1534
1535    // get process PID
1536    pid_t  pid = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid ));
1537
1538    // get pointers on owner process descriptor
1539    xptr_t      owner_xp  = hal_remote_l64( XPTR( process_cxy , &process_ptr->owner_xp ));
1540    process_t * owner_ptr = GET_PTR( owner_xp );
1541    cxy_t       owner_cxy = GET_CXY( owner_xp );
1542
1543    // get max fdid from owner process descriptor
1544    uint32_t max = hal_remote_l32( XPTR( owner_cxy , &owner_ptr->fd_array.max ));
1545
[683]1546    // get pointers on TXT0 chdev
1547    xptr_t    txt0_xp  = chdev_dir.txt_tx[0];
1548    cxy_t     txt0_cxy = GET_CXY( txt0_xp );
1549    chdev_t * txt0_ptr = GET_PTR( txt0_xp );
1550
1551    // get extended pointer on remote TXT0 lock
1552    xptr_t  lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock );
1553
1554    // get TXT0 lock
1555    remote_busylock_acquire( lock_xp );
1556
1557    nolock_printk("\n***** fd_array for pid %x in cluster %x / max %d *****\n",
[662]1558    pid, process_cxy, max );
1559
1560    for( fdid = 0 ; fdid <= max ; fdid++ )
1561    {
1562        // get pointers on file descriptor
1563        file_xp  = hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[fdid] ));
1564        file_ptr = GET_PTR( file_xp );
1565        file_cxy = GET_CXY( file_xp );
1566
1567        if( file_xp != XPTR_NULL )
1568        {
1569            // get file type
1570            file_type = hal_remote_l32( XPTR( file_cxy , &file_ptr->type )); 
1571
[669]1572            // get file name if inode exist
1573            if( (file_type != FILE_TYPE_PIPE) && (file_type != FILE_TYPE_SOCK) )
[662]1574            {
1575                // get inode pointers
1576                inode_ptr = hal_remote_lpt( XPTR( file_cxy , &file_ptr->inode ));
1577                inode_xp  = XPTR( file_cxy , inode_ptr );
1578
1579                // get file name
1580                vfs_inode_get_name( inode_xp , name );
1581
[669]1582                // display relevant file descriptor info
[683]1583                nolock_printk(" - %d : type %s / ptr %x (%s)\n",
[669]1584                fdid, process_fd_type_str(file_type), file_ptr, name );
[662]1585            }
[669]1586            else    // PIPE or SOCK types
[662]1587            {
1588                // display relevant file decriptor info
[683]1589                nolock_printk(" - %d : type %s / ptr %x\n",
[669]1590                fdid , process_fd_type_str(file_type), file_ptr );
[662]1591            }
1592        }
1593        else
1594        {
[683]1595            nolock_printk(" - %d : empty slot\n", fdid );
[662]1596        }
1597    }
[683]1598
1599    // get TXT0 lock
1600    remote_busylock_acquire( lock_xp );
1601
[662]1602}   // end process_fd_display()
1603
[1]1604////////////////////////////////////////////////////////////////////////////////////
1605//  Thread related functions
1606////////////////////////////////////////////////////////////////////////////////////
1607
1608/////////////////////////////////////////////////////
1609error_t process_register_thread( process_t * process,
1610                                 thread_t  * thread,
1611                                 trdid_t   * trdid )
1612{
[472]1613    ltid_t         ltid;
[683]1614    ltid_t         ltid_min;
1615
[472]1616    bool_t         found = false;
[683]1617    lpid_t         lpid  = LPID_FROM_PID( process->pid );
[472]1618 
[564]1619// check arguments
[669]1620assert( __FUNCTION__, (process != NULL) , "process argument is NULL" );
1621assert( __FUNCTION__, (thread != NULL) , "thread argument is NULL" );
[1]1622
[683]1623    // get the lock protecting th_tbl for all threads but the idle thread
[564]1624    if( thread->type != THREAD_IDLE ) rwlock_wr_acquire( &process->th_lock );
[1]1625
[683]1626    // compute ltid_min : 0 for an user thread / 1 for a kernel thread
1627    ltid_min = (lpid == 0) ? 1 : 0;
1628 
[583]1629    // scan th_tbl
[683]1630    for( ltid = ltid_min ; ltid < CONFIG_THREADS_MAX_PER_CLUSTER ; ltid++ )
[1]1631    {
1632        if( process->th_tbl[ltid] == NULL )
1633        {
1634            found = true;
1635            break;
1636        }
1637    }
1638
1639    if( found )
1640    {
1641        // register thread in th_tbl[]
1642        process->th_tbl[ltid] = thread;
1643        process->th_nr++;
1644
1645        // returns trdid
1646        *trdid = TRDID( local_cxy , ltid );
1647    }
1648
[583]1649    // release the lock protecting th_tbl
[564]1650    if( thread->type != THREAD_IDLE ) rwlock_wr_release( &process->th_lock );
[428]1651
[683]1652    return (found) ? 0 : -1;
[204]1653
1654}  // end process_register_thread()
1655
[625]1656///////////////////////////////////////////////////
1657uint32_t process_remove_thread( thread_t * thread )
[1]1658{
[443]1659    uint32_t count;  // number of threads in local process descriptor
1660
[625]1661// check thread
[669]1662assert( __FUNCTION__, (thread != NULL) , "thread argument is NULL" );
[625]1663
[1]1664    process_t * process = thread->process;
1665
1666    // get thread local index
1667    ltid_t  ltid = LTID_FROM_TRDID( thread->trdid );
[564]1668   
1669    // get the lock protecting th_tbl[]
1670    rwlock_wr_acquire( &process->th_lock );
[428]1671
[583]1672    // get number of threads
[443]1673    count = process->th_nr;
[428]1674
[564]1675// check th_nr value
[669]1676assert( __FUNCTION__, (count > 0) , "process th_nr cannot be 0" );
[443]1677
[1]1678    // remove thread from th_tbl[]
1679    process->th_tbl[ltid] = NULL;
[450]1680    process->th_nr = count-1;
[1]1681
[583]1682    // release lock protecting th_tbl
[564]1683    rwlock_wr_release( &process->th_lock );
[428]1684
[625]1685    return count;
[443]1686
[450]1687}  // end process_remove_thread()
[204]1688
[408]1689/////////////////////////////////////////////////////////
1690error_t process_make_fork( xptr_t      parent_process_xp,
1691                           xptr_t      parent_thread_xp,
1692                           pid_t     * child_pid,
1693                           thread_t ** child_thread )
[1]1694{
[408]1695    process_t * process;         // local pointer on child process descriptor
1696    thread_t  * thread;          // local pointer on child thread descriptor
1697    pid_t       new_pid;         // process identifier for child process
1698    pid_t       parent_pid;      // process identifier for parent process
1699    xptr_t      ref_xp;          // extended pointer on reference process
[428]1700    xptr_t      vfs_bin_xp;      // extended pointer on .elf file
[408]1701    error_t     error;
[1]1702
[408]1703    // get cluster and local pointer for parent process
1704    cxy_t       parent_process_cxy = GET_CXY( parent_process_xp );
[435]1705    process_t * parent_process_ptr = GET_PTR( parent_process_xp );
[101]1706
[428]1707    // get parent process PID and extended pointer on .elf file
[564]1708    parent_pid = hal_remote_l32 (XPTR( parent_process_cxy , &parent_process_ptr->pid));
1709    vfs_bin_xp = hal_remote_l64(XPTR( parent_process_cxy , &parent_process_ptr->vfs_bin_xp));
[428]1710
[564]1711    // get extended pointer on reference process
1712    ref_xp = hal_remote_l64( XPTR( parent_process_cxy , &parent_process_ptr->ref_xp ) );
[438]1713
[564]1714// check parent process is the reference process
[669]1715assert( __FUNCTION__, (parent_process_xp == ref_xp ) ,
[624]1716"parent process must be the reference process" );
[407]1717
[683]1718#if DEBUG_PROCESS_MAKE_FORK || DEBUG_PROCESS_ERROR
1719uint32_t   cycle  = (uint32_t)hal_get_cycles();
[583]1720thread_t * this  = CURRENT_THREAD;
1721trdid_t    trdid = this->trdid;
1722pid_t      pid   = this->process->pid;
[635]1723#endif
1724
1725#if( DEBUG_PROCESS_MAKE_FORK & 1 )
[438]1726if( DEBUG_PROCESS_MAKE_FORK < cycle )
[593]1727printk("\n[%s] thread[%x,%x] enter / cluster %x / cycle %d\n",
[583]1728__FUNCTION__, pid, trdid, local_cxy, cycle );
[433]1729#endif
[172]1730
[408]1731    // allocate a process descriptor
[683]1732    process = kmem_alloc( bits_log2(sizeof(process_t)) , AF_ZERO );
[635]1733
[408]1734    if( process == NULL )
1735    {
[683]1736
1737#if DEBUG_PROCESS_ERROR
1738printk("\n[ERROR] in %s : thread[%x,%x] cannot allocate process descriptor / cxy %x / cycle %d\n", 
1739__FUNCTION__, pid, trdid, local_cxy, cycle ); 
1740#endif
[408]1741        return -1;
1742    }
[1]1743
[408]1744    // allocate a child PID from local cluster
[416]1745    error = cluster_pid_alloc( process , &new_pid );
[428]1746    if( error ) 
[1]1747    {
[683]1748
1749#if DEBUG_PROCESS_ERROR
1750printk("\n[ERROR] in %s : thread[%x,%x] cannot get PID / cxy %x / cycle %d\n", 
1751__FUNCTION__, pid, trdid, local_cxy, cycle ); 
1752#endif
1753            kmem_free( process , bits_log2(sizeof(process_t)) );
[408]1754        return -1;
[1]1755    }
[408]1756
[469]1757#if( DEBUG_PROCESS_MAKE_FORK & 1 )
[457]1758if( DEBUG_PROCESS_MAKE_FORK < cycle )
[683]1759printk("\n[%s] thread[%x,%x] allocated child_process %x\n",
1760__FUNCTION__, pid, trdid, new_pid );
[457]1761#endif
1762
[408]1763    // initializes child process descriptor from parent process descriptor
[625]1764    error = process_reference_init( process,
1765                                    new_pid,
1766                                    parent_process_xp );
1767    if( error ) 
1768    {
[683]1769
1770#if DEBUG_PROCESS_ERROR
1771printk("\n[ERROR] in %s : thread[%x,%x] cannot initialize child process / cxy %x / cycle %d\n", 
1772__FUNCTION__, pid, trdid, local_cxy, cycle ); 
1773#endif
1774        cluster_pid_release( new_pid );
1775            kmem_free( process , bits_log2(sizeof(process_t)) );
[625]1776        return -1;
1777    }
[408]1778
[438]1779#if( DEBUG_PROCESS_MAKE_FORK & 1 )
1780if( DEBUG_PROCESS_MAKE_FORK < cycle )
[683]1781printk("\n[%s] thread[%x,%x] initialized child_process %x\n",
1782__FUNCTION__, pid, trdid, new_pid );
[433]1783#endif
[408]1784
1785    // copy VMM from parent descriptor to child descriptor
1786    error = vmm_fork_copy( process,
1787                           parent_process_xp );
1788    if( error )
[101]1789    {
[683]1790
1791#if DEBUG_PROCESS_ERROR
1792printk("\n[ERROR] in %s : thread[%x,%x] cannot copy VMM to child process / cxy %x / cycle %d\n", 
1793__FUNCTION__, pid, trdid, local_cxy, cycle ); 
1794#endif
[408]1795        cluster_pid_release( new_pid );
[683]1796            kmem_free( process , bits_log2(sizeof(process_t)) );
[408]1797        return -1;
[101]1798    }
[172]1799
[438]1800#if( DEBUG_PROCESS_MAKE_FORK & 1 )
1801if( DEBUG_PROCESS_MAKE_FORK < cycle )
[669]1802{
[683]1803    printk("\n[%s] thread[%x,%x] copied VMM from parent to child\n",
1804    __FUNCTION__, pid, trdid );
[669]1805    hal_vmm_display( XPTR( local_cxy , process ) , true );
1806}
[433]1807#endif
[407]1808
[564]1809    // if parent_process is INIT, or if parent_process is the TXT owner,
1810    // the child_process becomes the owner of its TXT terminal
1811    if( (parent_pid == 1) || process_txt_is_owner( parent_process_xp ) )
[457]1812    {
1813        process_txt_set_ownership( XPTR( local_cxy , process ) );
1814
1815#if( DEBUG_PROCESS_MAKE_FORK & 1 )
[626]1816if( DEBUG_PROCESS_MAKE_FORK < cycle )
[683]1817printk("\n[%s] thread[%x,%x] / child_process %x takes TXT ownership\n",
1818__FUNCTION__ , pid, trdid, new_pid );
[457]1819#endif
1820
1821    }
1822
[428]1823    // update extended pointer on .elf file
1824    process->vfs_bin_xp = vfs_bin_xp;
1825
[408]1826    // create child thread descriptor from parent thread descriptor
1827    error = thread_user_fork( parent_thread_xp,
1828                              process,
1829                              &thread );
1830    if( error )
1831    {
[683]1832
1833#if DEBUG_PROCESS_ERROR
1834printk("\n[ERROR] in %s : thread[%x,%x] cannot create main thread / cxy %x / cycle %d\n", 
1835__FUNCTION__, pid, trdid, local_cxy, cycle ); 
1836#endif
[408]1837        cluster_pid_release( new_pid );
[683]1838            kmem_free( process , bits_log2(sizeof(process_t)) );
[408]1839        return -1;
1840    }
[172]1841
[564]1842// check main thread LTID
[669]1843assert( __FUNCTION__, (LTID_FROM_TRDID(thread->trdid) == 0) ,
[624]1844"main thread must have LTID == 0" );
[428]1845
[564]1846#if( DEBUG_PROCESS_MAKE_FORK & 1 )
[438]1847if( DEBUG_PROCESS_MAKE_FORK < cycle )
[683]1848printk("\n[%s] thread[%x,%x] created main thread %x\n", 
1849__FUNCTION__, pid, trdid, thread );
[433]1850#endif
[1]1851
[635]1852    // set COW flag in DATA, ANON, REMOTE vsegs in parent process VMM
[629]1853    // this includes all parent process copies in all clusters
[408]1854    if( parent_process_cxy == local_cxy )   // reference is local
1855    {
1856        vmm_set_cow( parent_process_ptr );
1857    }
1858    else                                    // reference is remote
1859    {
1860        rpc_vmm_set_cow_client( parent_process_cxy,
1861                                parent_process_ptr );
1862    }
[1]1863
[625]1864    // set COW flag in DATA, ANON, REMOTE vsegs for child process VMM
[433]1865    vmm_set_cow( process );
1866 
[438]1867#if( DEBUG_PROCESS_MAKE_FORK & 1 )
1868if( DEBUG_PROCESS_MAKE_FORK < cycle )
[683]1869printk("\n[%s] thread[%x,%x] set COW in DATA / ANON / REMOTE for parent and child\n",
1870__FUNCTION__, pid, trdid );
[433]1871#endif
[101]1872
[428]1873    // get extended pointers on parent children_root, children_lock and children_nr
1874    xptr_t children_root_xp = XPTR( parent_process_cxy , &parent_process_ptr->children_root );
1875    xptr_t children_lock_xp = XPTR( parent_process_cxy , &parent_process_ptr->children_lock );
1876    xptr_t children_nr_xp   = XPTR( parent_process_cxy , &parent_process_ptr->children_nr   );
[101]1877
[428]1878    // register process in parent children list
[564]1879    remote_queuelock_acquire( children_lock_xp );
[428]1880        xlist_add_last( children_root_xp , XPTR( local_cxy , &process->children_list ) );
1881        hal_remote_atomic_add( children_nr_xp , 1 );
[564]1882    remote_queuelock_release( children_lock_xp );
[204]1883
[408]1884    // return success
1885    *child_thread = thread;
1886    *child_pid    = new_pid;
[1]1887
[438]1888#if DEBUG_PROCESS_MAKE_FORK
[433]1889cycle = (uint32_t)hal_get_cycles();
[438]1890if( DEBUG_PROCESS_MAKE_FORK < cycle )
[593]1891printk("\n[%s] thread[%x,%x] exit / created process %x / cycle %d\n",
[583]1892__FUNCTION__, pid, trdid, new_pid, cycle );
[433]1893#endif
[428]1894
[408]1895    return 0;
1896
[416]1897}   // end process_make_fork()
[408]1898
[683]1899#if DEBUG_PROCESS_MAKE_EXEC
1900
1901/////////////////////////////////////////////////////////////////////////////////////////
1902// This static debug function displays the current state of the exec_info structure
1903// embedded in the calling process descriptor.
1904//
1905// WARNING : It can be used after execution of the sys_exec function, but it cannot
1906//           be used after execution of the process_make_exec() function, because the
1907//           kernel pointers have been replaced by user pointers.
1908/////////////////////////////////////////////////////////////////////////////////////////
1909static void process_exec_info_display( bool_t args_ok,
1910                                       bool_t envs_ok )
[408]1911{
[683]1912    uint32_t   i;
1913    char     * str;    // local pointer on a string
[669]1914
[683]1915    process_t * process = CURRENT_THREAD->process;
[669]1916
[683]1917    // get relevant info from calling process descriptor
1918    pid_t       pid      = process->pid;
[669]1919
[683]1920    uint32_t    args_nr  = process->exec_info.args_nr;
1921    char     ** args     = process->exec_info.args_pointers;
[669]1922
[683]1923    uint32_t    envs_nr  = process->exec_info.envs_nr;
1924    char     ** envs     = process->exec_info.envs_pointers;
[669]1925
[683]1926    char      * path     = process->exec_info.path;
[669]1927
[683]1928    // get pointers on TXT0 chdev
1929    xptr_t    txt0_xp  = chdev_dir.txt_tx[0];
1930    cxy_t     txt0_cxy = GET_CXY( txt0_xp );
1931    chdev_t * txt0_ptr = GET_PTR( txt0_xp );
[669]1932
[683]1933    // get extended pointer on remote TXT0 lock
1934    xptr_t  lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock );
[669]1935
[683]1936    // get TXT0 lock
1937    remote_busylock_acquire( lock_xp );
[669]1938
[683]1939    nolock_printk("\n***** exec_info for process %x in cluster %x / %s\n",
1940    pid , local_cxy , path ); 
[669]1941
[683]1942    // display arguments if required
1943    if( args_ok )
[669]1944    {
[683]1945        for( i = 0 ; i < args_nr ; i++ )
1946        {
1947            str = args[i];
1948            if( str != NULL)         // display pointer and string
1949            nolock_printk(" - &arg[%d] = %x / arg[%d] = <%s>\n", i, str, i, str );
1950            else                     // display WARNING
1951            nolock_printk(" - unexpected NULL pointer for &arg[%d]\n", i );
1952        }
1953    }
[669]1954
[683]1955    // display env variables if required
1956    if( envs_ok )
[669]1957    {
[683]1958        for( i = 0 ; i < envs_nr ; i++ )
1959        {
1960            str = envs[i];
1961            if( str != NULL)     // display pointer and string
1962            nolock_printk(" - &env[%d] = %x / env[%d] = <%s>\n", i, str, i, str );
1963            else                     // display WARNING
1964            nolock_printk(" - unexpected NULL pointer for &env[%d]\n", i );
1965        }
[669]1966    }
1967
[683]1968    // release TXT0 lock
1969    remote_busylock_release( lock_xp );
[669]1970
[683]1971}  // end process_exec_info_display()
[669]1972
[683]1973#endif // DEBUG_PROCESS_MAKE_EXEC
[669]1974
1975/////////////////////////////////
1976error_t process_make_exec( void )
1977{
1978    thread_t       * this;                    // local pointer on this thread
[457]1979    process_t      * process;                 // local pointer on this process
1980    pid_t            pid;                     // this process identifier
[669]1981    trdid_t          trdid;                   // this thread identifier
[610]1982    xptr_t           ref_xp;                  // reference process for this process
[441]1983        error_t          error;                   // value returned by called functions
[669]1984    char           * elf_path;                // path to .elf file
[457]1985    xptr_t           file_xp;                 // extended pointer on .elf file descriptor
1986    uint32_t         file_id;                 // file index in fd_array
[669]1987    vseg_t         * vseg;                    // local pointer on created vseg(s)
1988    uint32_t         n;                       // index for loops
[446]1989
[669]1990    uint32_t         args_nr;                 // actual number of args (from exec_info)
1991    intptr_t         args_base;               // args vseg base address in user space
1992    uint32_t         args_size;               // args vseg size (bytes)
1993
1994    uint32_t         envs_nr;                 // actual number of envs (from exec_info)
1995    intptr_t         envs_base;               // envs vseg base address in user space
1996    uint32_t         envs_size;               // envs vseg size (bytes)
1997
[683]1998#if DEBUG_PROCESS_MAKE_EXEC || DEBUG_PROCESS_ERROR
1999uint32_t cycle = (uint32_t)hal_get_cycles();
2000#endif
2001
[669]2002    // get calling thread, process, pid, trdid, and ref_xp
2003    this    = CURRENT_THREAD;
2004    process = this->process;
[457]2005    pid     = process->pid;
[669]2006    trdid   = this->trdid;
[610]2007    ref_xp  = process->ref_xp;
[408]2008
[669]2009        // get .elf pathname from exec_info structure
2010        elf_path      = process->exec_info.path;
[408]2011
[438]2012#if DEBUG_PROCESS_MAKE_EXEC
[635]2013if( DEBUG_PROCESS_MAKE_EXEC < cycle )
[669]2014printk("\n[%s] thread[%x,%x] enters for <%s> / cycle %d\n",
2015__FUNCTION__, pid, trdid, elf_path, cycle );
[433]2016#endif
[408]2017
[669]2018    // 1. open the file identified by <path>
[457]2019    file_xp = XPTR_NULL;
[564]2020    file_id = 0xFFFFFFFF;
[610]2021        error   = vfs_open( process->vfs_root_xp,
[669]2022                            elf_path,
[610]2023                        ref_xp,
[457]2024                            O_RDONLY,
2025                            0,
2026                            &file_xp,
2027                            &file_id );
2028        if( error )
2029        {
[683]2030
2031#if DEBUG_PROCESS_ERROR
2032printk("\n[ERROR] in %s : thread[%x,%x] failed to open file <%s> / cycle %d\n", 
2033__FUNCTION__, pid, trdid, elf_path, cycle ); 
2034#endif
[457]2035                return -1;
2036        }
2037
[446]2038#if (DEBUG_PROCESS_MAKE_EXEC & 1)
[635]2039if( DEBUG_PROCESS_MAKE_EXEC < cycle )
[669]2040printk("\n[%s] thread[%x,%x] opened file <%s>\n",
2041__FUNCTION__, pid, trdid, elf_path );
[446]2042#endif
2043
[669]2044    // 2. delete all threads other than this main thread in all clusters
[457]2045    process_sigaction( pid , DELETE_ALL_THREADS );
[446]2046
[469]2047#if (DEBUG_PROCESS_MAKE_EXEC & 1)
[635]2048if( DEBUG_PROCESS_MAKE_EXEC < cycle )
[669]2049printk("\n[%s] thread[%x,%x] deleted existing threads\n",
2050__FUNCTION__, pid, trdid );
[469]2051#endif
2052
[669]2053    // 3. reset calling process VMM
[625]2054    vmm_user_reset( process );
[446]2055
[457]2056#if( DEBUG_PROCESS_MAKE_EXEC & 1 )
[635]2057if( DEBUG_PROCESS_MAKE_EXEC < cycle )
[669]2058{
2059    printk("\n[%s] thread[%x,%x] completed VMM reset\n",
2060    __FUNCTION__, pid, trdid );
2061    hal_vmm_display( ref_xp , true );
2062}
[457]2063#endif
[408]2064
[683]2065    // 4. register the "args" vseg in VSL and map it in GPT, if args_nr != 0.
2066    //    As this vseg contains an array of pointers, the kernel pointers
2067    //    are replaced by user pointers in new process space.
[669]2068    args_nr = process->exec_info.args_nr;
2069
2070    if( args_nr > 0 )
[416]2071    {
[669]2072        // get args vseg base and size in user space
[683]2073        args_base = CONFIG_VMM_UTILS_BASE << CONFIG_PPM_PAGE_ORDER;
2074        args_size = CONFIG_VMM_ARGS_SIZE << CONFIG_PPM_PAGE_ORDER;
[669]2075
2076        // create and register args vseg in VMM
2077        vseg = vmm_create_vseg( process,
2078                                VSEG_TYPE_DATA,
2079                                args_base,
2080                                args_size,
2081                                0,                 // file_offset unused for DATA type
2082                                0,                 // file_size unused for DATA type
2083                                XPTR_NULL,         // mapper_xp unused for DATA type
2084                                0 );               // cxy unused for DATA type
2085        if( vseg == NULL )
2086        {
[683]2087
2088#if DEBUG_PROCESS_ERROR
2089printk("\n[ERROR] in %s : thread[%x,%x] cannot create args vseg for <%s> / cycle %d\n", 
2090__FUNCTION__, pid, trdid, elf_path, cycle ); 
2091#endif
[669]2092                     return -1;
2093        }
2094
[438]2095#if( DEBUG_PROCESS_MAKE_EXEC & 1 )
[635]2096if( DEBUG_PROCESS_MAKE_EXEC < cycle )
[669]2097{
2098    printk("\n[%s] thread[%x,%x] args vseg registered in new process VSL\n",
2099    __FUNCTION__, pid, trdid );
2100    hal_vmm_display( ref_xp , true );
2101}
[433]2102#endif
[683]2103        // map all pages for the "args" vseg
[669]2104        uint32_t fake_attr;   // required for hal_gpt_lock_pte()
2105        ppn_t    fake_ppn;    // required for hal_gpt_lock_pte()
[428]2106
[683]2107        xptr_t   base_xp = XPTR( local_cxy , process->exec_info.args_pointers );
2108        xptr_t   gpt_xp  = XPTR( local_cxy , &process->vmm.gpt );
2109        uint32_t attr    = GPT_MAPPED | GPT_SMALL | GPT_READABLE | GPT_USER | GPT_CACHABLE;
2110        vpn_t    vpn     = CONFIG_VMM_UTILS_BASE;
2111        ppn_t    ppn     = ppm_base2ppn( base_xp );
[669]2112
2113        for( n = 0 ; n < CONFIG_VMM_ARGS_SIZE ; n++ ) 
2114        {
2115            // lock the PTE
[683]2116            if (hal_gpt_lock_pte( gpt_xp , vpn + n , &fake_attr , &fake_ppn ) )
[669]2117            {
[683]2118
2119#if DEBUG_PROCESS_ERROR
2120printk("\n[ERROR] in %s : thread[%x,%x] cannot map vpn[%x] of args vseg for <%s> / cycle %d\n", 
2121__FUNCTION__, pid, trdid,  vpn + n , elf_path , cycle ); 
2122#endif
[669]2123                        return -1;
2124            }
2125
2126            // map and unlock the PTE
[683]2127            hal_gpt_set_pte( gpt_xp , vpn + n , attr , ppn + n );
2128       }
[669]2129
2130#if( DEBUG_PROCESS_MAKE_EXEC & 1 )
2131if( DEBUG_PROCESS_MAKE_EXEC < cycle )
2132{
2133    printk("\n[%s] thread[%x,%x] args vseg mapped in new process GPT\n",
2134    __FUNCTION__, pid, trdid );
2135    hal_vmm_display( ref_xp , true );
[683]2136    process_exec_info_display( true , false );   // args & not envs
[669]2137}
2138#endif
2139
[683]2140        // build pointer on args buffer in kernel space
2141        char  ** k_args = process->exec_info.args_pointers;
[669]2142
[683]2143        // build pointer on args buffer in user space
2144        char  ** u_args = (char **)args_base;
2145
2146        // set user space pointers in kernel args buffer
[669]2147        for( n = 0 ; n < args_nr ; n++ )
2148        {
[683]2149            k_args[n] = (char *)((intptr_t)k_args[n] + (intptr_t)u_args - (intptr_t)k_args);
[669]2150        } 
[683]2151
2152#if( DEBUG_PROCESS_MAKE_EXEC & 1 )
2153if( DEBUG_PROCESS_MAKE_EXEC < cycle )
2154printk("\n[%s] thread[%x,%x] args user pointers set in exec_info\n",
2155__FUNCTION__, pid, trdid );
2156#endif
2157
[669]2158    }
2159
[683]2160    // 5. register the "envs" vseg in VSL and map it in GPT, if envs_nr != 0.
2161    //    As this vseg contains an array of pointers, the kernel pointers
2162    //    are replaced by user pointers in new process space.
2163
[669]2164    envs_nr = process->exec_info.envs_nr;
2165
2166    if( envs_nr > 0 )
2167    {
2168        // get envs vseg base and size in user space from config
[683]2169        envs_base = (CONFIG_VMM_UTILS_BASE + CONFIG_VMM_ARGS_SIZE) << CONFIG_PPM_PAGE_ORDER;
2170        envs_size = CONFIG_VMM_ENVS_SIZE << CONFIG_PPM_PAGE_ORDER;
[669]2171
[683]2172        // TODO (should be similar to the code for args above)
2173
2174#if( DEBUG_PROCESS_MAKE_EXEC & 1 )
2175if( DEBUG_PROCESS_MAKE_EXEC < cycle )
2176printk("\n[%s] thread[%x,%x] envs user pointers set in exec_info\n",
2177__FUNCTION__, pid, trdid );
2178#endif
2179
[669]2180    }
2181
2182
2183    // 6. register code & data vsegs, and entry-point in process VMM,
2184    // register extended pointer on .elf file in process descriptor
[457]2185        error = elf_load_process( file_xp , process );
[683]2186
[441]2187    if( error )
[1]2188        {
[683]2189
2190#if DEBUG_PROCESS_ERROR
2191printk("\n[ERROR] in %s : thread[%x,%x] failed to access file <%s> / cycle %d\n", 
2192__FUNCTION__, pid, trdid , elf_path , cycle ); 
2193#endif
[408]2194        return -1;
[1]2195        }
2196
[438]2197#if( DEBUG_PROCESS_MAKE_EXEC & 1 )
[635]2198if( DEBUG_PROCESS_MAKE_EXEC < cycle )
[669]2199{
2200    printk("\n[%s] thread[%x,%x] registered code/data vsegs / entry %x\n",
2201    __FUNCTION__, pid, trdid, process->vmm.entry_point );
2202    hal_vmm_display( ref_xp , true );
2203}
[433]2204#endif
[1]2205
[669]2206    // 7. allocate an user stack vseg for main thread
2207    vseg = vmm_create_vseg( process,
2208                            VSEG_TYPE_STACK,
2209                            LTID_FROM_TRDID( trdid ),
2210                            0,                 // length unused
2211                            0,                 // file_offset unused
2212                            0,                 // file_size unused
2213                            XPTR_NULL,         // mapper_xp unused
2214                            local_cxy );
2215    if( vseg == NULL )
2216    {
[683]2217
2218#if DEBUG_PROCESS_ERROR
2219printk("\n[ERROR] in %s : thread[%x,%x] failed to set u_stack vseg for <%s> / cycle %d\n", 
2220__FUNCTION__, pid, trdid , elf_path , cycle ); 
2221#endif
[669]2222                return -1;
2223    }
2224
2225#if( DEBUG_PROCESS_MAKE_EXEC & 1 )
2226if( DEBUG_PROCESS_MAKE_EXEC < cycle )
2227{
2228    printk("\n[%s] thread[%x,%x] registered stack vseg\n",
2229    __FUNCTION__, pid, trdid );
2230    hal_vmm_display( ref_xp , true );
2231}
2232#endif
2233
2234    // update user stack in thread descriptor
2235    this->user_stack_vseg = vseg;
2236
2237    // 8. update the main thread descriptor ... and jumps (one way) to user code
2238    thread_user_exec( args_nr , args_base );
2239
[457]2240    if( error )
2241    {
[683]2242
2243#if DEBUG_PROCESS_ERROR
2244printk("\n[ERROR] in %s : thread[%x,%x] failed to set main thread for <%s> / cycle %d\n", 
2245__FUNCTION__, pid, trdid , elf_path , cycle ); 
2246#endif
[408]2247        return -1;
[457]2248    }
[1]2249
[683]2250    // should not be reached, avoid a warning
[409]2251        return 0;
2252
2253}  // end process_make_exec()
2254
[457]2255
[623]2256////////////////////////////////////////////////
2257void process_zero_create( process_t   * process,
2258                          boot_info_t * info )
[428]2259{
[580]2260    error_t error;
2261    pid_t   pid;
[428]2262
[438]2263#if DEBUG_PROCESS_ZERO_CREATE
[433]2264uint32_t cycle = (uint32_t)hal_get_cycles();
[438]2265if( DEBUG_PROCESS_ZERO_CREATE < cycle )
[593]2266printk("\n[%s] enter / cluster %x / cycle %d\n",
[564]2267__FUNCTION__, local_cxy, cycle );
[433]2268#endif
[428]2269
[624]2270    // get pointer on VMM
2271    vmm_t * vmm = &process->vmm;
2272
[580]2273    // get PID from local cluster manager for this kernel process
2274    error = cluster_pid_alloc( process , &pid );
2275
2276    if( error || (LPID_FROM_PID( pid ) != 0) )
2277    {
2278        printk("\n[PANIC] in %s : cannot get valid PID in cluster %x / PID = %x\n",
2279        __FUNCTION__ , local_cxy, pid );
2280        hal_core_sleep();
2281    }
2282
[635]2283#if (DEBUG_PROCESS_ZERO_CREATE & 1)
2284if( DEBUG_PROCESS_ZERO_CREATE < cycle )
2285printk("\n[%s] allocated pid %x in cluster %x\n", __FUNCTION__, pid, local_cxy );
2286#endif
2287
[428]2288    // initialize PID, REF_XP, PARENT_XP, and STATE
[580]2289    // the kernel process_zero is its own parent_process,
2290    // reference_process, and owner_process, and cannot be killed...
2291    process->pid        = pid;
[433]2292    process->ref_xp     = XPTR( local_cxy , process );
[443]2293    process->owner_xp   = XPTR( local_cxy , process );
[580]2294    process->parent_xp  = XPTR( local_cxy , process );
[433]2295    process->term_state = 0;
[428]2296
[635]2297    // initialize VSL as empty
[624]2298    vmm->vsegs_nr = 0;
2299        xlist_root_init( XPTR( local_cxy , &vmm->vsegs_root ) );
[623]2300
[635]2301#if (DEBUG_PROCESS_ZERO_CREATE & 1)
2302if( DEBUG_PROCESS_ZERO_CREATE < cycle )
2303printk("\n[%s] initialized VSL empty in cluster %x\n", __FUNCTION__, local_cxy );
2304#endif
2305
2306    // initialize GPT as empty
[624]2307    error = hal_gpt_create( &vmm->gpt );
[635]2308
[624]2309    if( error ) 
2310    {
2311        printk("\n[PANIC] in %s : cannot create empty GPT\n", __FUNCTION__ );
2312        hal_core_sleep();
2313    }
2314
[635]2315#if (DEBUG_PROCESS_ZERO_CREATE & 1)
2316if( DEBUG_PROCESS_ZERO_CREATE < cycle )
2317printk("\n[%s] initialized GPT empty in cluster %x\n", __FUNCTION__, local_cxy );
2318#endif
2319
[625]2320    // initialize VSL and GPT locks
[629]2321    remote_rwlock_init( XPTR( local_cxy , &vmm->vsl_lock ) , LOCK_VMM_VSL );
[624]2322   
2323    // create kernel vsegs in GPT and VSL, as required by the hardware architecture
2324    error = hal_vmm_kernel_init( info );
[635]2325
[624]2326    if( error ) 
2327    {
2328        printk("\n[PANIC] in %s : cannot create kernel vsegs in VMM\n", __FUNCTION__ );
2329        hal_core_sleep();
2330    }
2331
[635]2332#if (DEBUG_PROCESS_ZERO_CREATE & 1)
2333if( DEBUG_PROCESS_ZERO_CREATE < cycle )
2334printk("\n[%s] initialized hal specific VMM in cluster%x\n", __FUNCTION__, local_cxy );
[683]2335hal_vmm_display( XPTR( local_cxy , process ) , true ); 
[635]2336#endif
2337
[564]2338    // reset th_tbl[] array and associated fields
[428]2339    uint32_t i;
[564]2340    for( i = 0 ; i < CONFIG_THREADS_MAX_PER_CLUSTER ; i++ )
[428]2341        {
2342        process->th_tbl[i] = NULL;
2343    }
2344    process->th_nr  = 0;
[564]2345    rwlock_init( &process->th_lock , LOCK_PROCESS_THTBL );
[428]2346
[635]2347#if (DEBUG_PROCESS_ZERO_CREATE & 1)
2348if( DEBUG_PROCESS_ZERO_CREATE < cycle )
2349printk("\n[%s] initialized th_tbl[] in cluster%x\n", __FUNCTION__, local_cxy );
2350#endif
[564]2351
[428]2352    // reset children list as empty
2353    xlist_root_init( XPTR( local_cxy , &process->children_root ) );
2354    process->children_nr = 0;
[564]2355    remote_queuelock_init( XPTR( local_cxy , &process->children_lock ),
2356                           LOCK_PROCESS_CHILDREN );
[428]2357
[635]2358#if (DEBUG_PROCESS_ZERO_CREATE & 1)
2359if( DEBUG_PROCESS_ZERO_CREATE < cycle )
2360printk("\n[%s] initialized children list in cluster%x\n", __FUNCTION__, local_cxy );
2361#endif
2362
[580]2363    // register kernel process in cluster manager local_list
2364    cluster_process_local_link( process );
2365   
[428]2366        hal_fence();
2367
[438]2368#if DEBUG_PROCESS_ZERO_CREATE
[433]2369cycle = (uint32_t)hal_get_cycles();
[438]2370if( DEBUG_PROCESS_ZERO_CREATE < cycle )
[593]2371printk("\n[%s] exit / cluster %x / cycle %d\n",
[564]2372__FUNCTION__, local_cxy, cycle );
[433]2373#endif
[428]2374
[610]2375}  // end process_zero_create()
[428]2376
[564]2377////////////////////////////////
[485]2378void process_init_create( void )
[1]2379{
[428]2380    process_t      * process;       // local pointer on process descriptor
[409]2381    pid_t            pid;           // process_init identifier
2382    thread_t       * thread;        // local pointer on main thread
2383    pthread_attr_t   attr;          // main thread attributes
2384    lid_t            lid;           // selected core local index for main thread
[457]2385    xptr_t           file_xp;       // extended pointer on .elf file descriptor
2386    uint32_t         file_id;       // file index in fd_array
[409]2387    error_t          error;
[1]2388
[438]2389#if DEBUG_PROCESS_INIT_CREATE
[610]2390thread_t * this = CURRENT_THREAD;
[433]2391uint32_t cycle = (uint32_t)hal_get_cycles();
[438]2392if( DEBUG_PROCESS_INIT_CREATE < cycle )
[610]2393printk("\n[%s] thread[%x,%x] enter / cycle %d\n",
2394__FUNCTION__, this->process->pid, this->trdid, cycle );
[433]2395#endif
[1]2396
[408]2397    // allocates memory for process descriptor from local cluster
[683]2398    process = kmem_alloc( bits_log2(sizeof(process_t)) , AF_ZERO );
[625]2399    if( process == NULL )
2400    {
2401        printk("\n[PANIC] in %s : cannot allocate process\n", __FUNCTION__ );
2402        hal_core_sleep();
2403    }
[101]2404
[610]2405    // set the CWD and VFS_ROOT fields in process descriptor
2406    process->cwd_xp      = process_zero.vfs_root_xp;
2407    process->vfs_root_xp = process_zero.vfs_root_xp;
2408
[409]2409    // get PID from local cluster
[416]2410    error = cluster_pid_alloc( process , &pid );
[625]2411    if( error ) 
2412    {
2413        printk("\n[PANIC] in %s : cannot allocate PID\n", __FUNCTION__ );
2414        hal_core_sleep();
2415    }
2416    if( pid != 1 ) 
2417    {
2418        printk("\n[PANIC] in %s : process PID must be 0x1\n", __FUNCTION__ );
2419        hal_core_sleep();
2420    }
[408]2421
[409]2422    // initialize process descriptor / parent is local process_zero
[625]2423    error = process_reference_init( process,
2424                                    pid,
2425                                    XPTR( local_cxy , &process_zero ) ); 
2426    if( error )
2427    {
2428        printk("\n[PANIC] in %s : cannot initialize process\n", __FUNCTION__ );
2429        hal_core_sleep();
2430    }
[408]2431
[564]2432#if(DEBUG_PROCESS_INIT_CREATE & 1)
2433if( DEBUG_PROCESS_INIT_CREATE < cycle )
[610]2434printk("\n[%s] thread[%x,%x] initialized process descriptor\n",
2435__FUNCTION__, this->process->pid, this->trdid );
[564]2436#endif
2437
[457]2438    // open the file identified by CONFIG_PROCESS_INIT_PATH
2439    file_xp = XPTR_NULL;
2440    file_id = -1;
[610]2441        error   = vfs_open( process->vfs_root_xp,
[457]2442                            CONFIG_PROCESS_INIT_PATH,
[610]2443                        XPTR( local_cxy , process ),
[457]2444                            O_RDONLY,
2445                            0,
2446                            &file_xp,
2447                            &file_id );
[625]2448    if( error )
2449    {
2450        printk("\n[PANIC] in %s : cannot open file <%s>\n",
2451         __FUNCTION__, CONFIG_PROCESS_INIT_PATH  );
2452        hal_core_sleep();
2453    }
[457]2454
[564]2455#if(DEBUG_PROCESS_INIT_CREATE & 1)
2456if( DEBUG_PROCESS_INIT_CREATE < cycle )
[610]2457printk("\n[%s] thread[%x,%x] open .elf file decriptor\n",
2458__FUNCTION__, this->process->pid, this->trdid );
[564]2459#endif
2460
[625]2461    // register "code" and "data" vsegs as well as entry-point
[409]2462    // in process VMM, using information contained in the elf file.
[457]2463        error = elf_load_process( file_xp , process );
[101]2464
[625]2465    if( error ) 
2466    {
2467        printk("\n[PANIC] in %s : cannot access file <%s>\n",
2468         __FUNCTION__, CONFIG_PROCESS_INIT_PATH  );
2469        hal_core_sleep();
2470    }
[457]2471
[625]2472
[564]2473#if(DEBUG_PROCESS_INIT_CREATE & 1)
2474if( DEBUG_PROCESS_INIT_CREATE < cycle )
[669]2475{
2476    printk("\n[%s] thread[%x,%x] registered code/data vsegs in VMM\n",
2477    __FUNCTION__, this->process->pid, this->trdid );
2478    hal_vmm_display( XPTR( local_cxy , process ) , true );
2479}
[564]2480#endif
2481
[428]2482    // get extended pointers on process_zero children_root, children_lock
2483    xptr_t children_root_xp = XPTR( local_cxy , &process_zero.children_root );
2484    xptr_t children_lock_xp = XPTR( local_cxy , &process_zero.children_lock );
2485
[564]2486    // take lock protecting kernel process children list
2487    remote_queuelock_acquire( children_lock_xp );
2488
[428]2489    // register process INIT in parent local process_zero
2490        xlist_add_last( children_root_xp , XPTR( local_cxy , &process->children_list ) );
2491        hal_atomic_add( &process_zero.children_nr , 1 );
2492
[564]2493    // release lock protecting kernel process children list
2494    remote_queuelock_release( children_lock_xp );
2495
2496#if(DEBUG_PROCESS_INIT_CREATE & 1)
2497if( DEBUG_PROCESS_INIT_CREATE < cycle )
[610]2498printk("\n[%s] thread[%x,%x] registered init process in parent\n",
2499__FUNCTION__, this->process->pid, this->trdid );
[564]2500#endif
2501
[409]2502    // select a core in local cluster to execute the main thread
[637]2503    lid  = cluster_select_local_core( local_cxy );
[409]2504
2505    // initialize pthread attributes for main thread
2506    attr.attributes = PT_ATTR_DETACH | PT_ATTR_CLUSTER_DEFINED | PT_ATTR_CORE_DEFINED;
2507    attr.cxy        = local_cxy;
2508    attr.lid        = lid;
2509
2510    // create and initialize thread descriptor
2511        error = thread_user_create( pid,
2512                                (void *)process->vmm.entry_point,
2513                                NULL,
2514                                &attr,
2515                                &thread );
[1]2516
[625]2517    if( error )
2518    {
2519        printk("\n[PANIC] in %s : cannot create main thread\n", __FUNCTION__  );
2520        hal_core_sleep();
2521    }
2522    if( thread->trdid != 0 )
2523    {
2524        printk("\n[PANIC] in %s : bad main thread trdid\n", __FUNCTION__  );
2525        hal_core_sleep();
2526    }
[428]2527
[564]2528#if(DEBUG_PROCESS_INIT_CREATE & 1)
2529if( DEBUG_PROCESS_INIT_CREATE < cycle )
[610]2530printk("\n[%s] thread[%x,%x] created main thread\n",
2531__FUNCTION__, this->process->pid, this->trdid );
[564]2532#endif
2533
[409]2534    // activate thread
2535        thread_unblock( XPTR( local_cxy , thread ) , THREAD_BLOCKED_GLOBAL );
2536
[124]2537    hal_fence();
[1]2538
[438]2539#if DEBUG_PROCESS_INIT_CREATE
[433]2540cycle = (uint32_t)hal_get_cycles();
[438]2541if( DEBUG_PROCESS_INIT_CREATE < cycle )
[610]2542printk("\n[%s] thread[%x,%x] exit / cycle %d\n",
2543__FUNCTION__, this->process->pid, this->trdid, cycle );
[433]2544#endif
[409]2545
[204]2546}  // end process_init_create()
2547
[683]2548///////////////////////////////////////////////////
2549uint32_t process_build_string( xptr_t   process_xp,
2550                               char   * buffer,
2551                               uint32_t size )
[428]2552{
[683]2553    int32_t       length;          // actual length of the string
[443]2554
[683]2555    process_t   * process_ptr;     // process descriptor local pointer
2556    cxy_t         process_cxy;     // process descriptor cluster identifier
2557
[428]2558    xptr_t        parent_xp;       // extended pointer on parent process
[683]2559    process_t   * parent_ptr;      // parent process local pointer
2560    cxy_t         parent_cxy;      // parent process cluster identifier
[428]2561
[443]2562    xptr_t        owner_xp;        // extended pointer on owner process
[683]2563    process_t   * owner_ptr;       // owner process local pointer
2564    cxy_t         owner_cxy;       // owner process cluster identifier
[443]2565
[683]2566    pid_t         pid;             // process identifier
2567    pid_t         ppid;            // parent process identifier
2568    lpid_t        lpid;            // local process identifier
2569    uint32_t      state;           // terminaison state
2570    uint32_t      th_nr;           // number of threads
[428]2571
[443]2572    xptr_t        txt_file_xp;     // extended pointer on TXT_RX file descriptor
2573    xptr_t        txt_chdev_xp;    // extended pointer on TXT_RX chdev
2574    chdev_t     * txt_chdev_ptr;
2575    cxy_t         txt_chdev_cxy;
2576    xptr_t        txt_owner_xp;    // extended pointer on TXT owner process
[428]2577
2578    xptr_t        elf_file_xp;     // extended pointer on .elf file
2579    cxy_t         elf_file_cxy;
2580    vfs_file_t  * elf_file_ptr;
2581    vfs_inode_t * elf_inode_ptr;   // local pointer on .elf inode
2582
2583    char          txt_name[CONFIG_VFS_MAX_NAME_LENGTH];
2584    char          elf_name[CONFIG_VFS_MAX_NAME_LENGTH];
2585
[683]2586assert( __FUNCTION__ , (size >= 80 ) , "buffer size too small" );
2587
[428]2588    // get cluster and local pointer on process
2589    process_ptr = GET_PTR( process_xp );
2590    process_cxy = GET_CXY( process_xp );
2591
[580]2592    // get process PID, LPID, and state
[564]2593    pid   = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid ) );
[580]2594    lpid  = LPID_FROM_PID( pid );
[564]2595    state = hal_remote_l32( XPTR( process_cxy , &process_ptr->term_state ) );
[428]2596
[580]2597    // get process PPID
[564]2598    parent_xp  = hal_remote_l64( XPTR( process_cxy , &process_ptr->parent_xp ) );
[428]2599    parent_cxy = GET_CXY( parent_xp );
2600    parent_ptr = GET_PTR( parent_xp );
[564]2601    ppid       = hal_remote_l32( XPTR( parent_cxy , &parent_ptr->pid ) );
[428]2602
2603    // get number of threads
[564]2604    th_nr      = hal_remote_l32( XPTR( process_cxy , &process_ptr->th_nr ) );
[428]2605
[443]2606    // get pointers on owner process descriptor
[564]2607    owner_xp  = hal_remote_l64( XPTR( process_cxy , &process_ptr->owner_xp ) );
[443]2608    owner_cxy = GET_CXY( owner_xp );
2609    owner_ptr = GET_PTR( owner_xp );
[428]2610
[580]2611    // get process TXT name and .elf name
2612    if( lpid )                                   // user process
2613    {
2614        // get extended pointer on file descriptor associated to TXT_RX
2615        txt_file_xp = hal_remote_l64( XPTR( owner_cxy , &owner_ptr->fd_array.array[0] ) );
[428]2616
[683]2617assert( __FUNCTION__, (txt_file_xp != XPTR_NULL) ,
2618"process must be attached to one TXT terminal" ); 
[443]2619
[580]2620        // get TXT_RX chdev pointers
2621        txt_chdev_xp  = chdev_from_file( txt_file_xp );
2622        txt_chdev_cxy = GET_CXY( txt_chdev_xp );
2623        txt_chdev_ptr = GET_PTR( txt_chdev_xp );
2624
2625        // get TXT_RX name and ownership
2626        hal_remote_strcpy( XPTR( local_cxy , txt_name ) ,
2627                           XPTR( txt_chdev_cxy , txt_chdev_ptr->name ) );
[428]2628   
[683]2629        // get TXT_owner process
[580]2630        txt_owner_xp = (xptr_t)hal_remote_l64( XPTR( txt_chdev_cxy, 
2631                                                     &txt_chdev_ptr->ext.txt.owner_xp ) );
2632        // get process .elf name
2633        elf_file_xp   = hal_remote_l64( XPTR( process_cxy , &process_ptr->vfs_bin_xp ) );
2634        elf_file_cxy  = GET_CXY( elf_file_xp );
2635        elf_file_ptr  = GET_PTR( elf_file_xp );
2636        elf_inode_ptr = hal_remote_lpt( XPTR( elf_file_cxy , &elf_file_ptr->inode ) );
2637        vfs_inode_get_name( XPTR( elf_file_cxy , elf_inode_ptr ) , elf_name );
2638    }
2639    else                                         // kernel process_zero
2640    {
[683]2641        // TXT name and .elf name are not registered in kernel process
[580]2642        strcpy( txt_name , "txt0_rx" );
2643        txt_owner_xp = process_xp; 
2644        strcpy( elf_name , "kernel.elf" );
2645    }
2646
[428]2647    // display process info
[443]2648    if( txt_owner_xp == process_xp )
[428]2649    {
[683]2650        length = snprintk( buffer, size,
2651        "PID %X | %s (FG) | %X | PPID %X | TS %X | %d | %s\n", 
[581]2652        pid, txt_name, process_ptr, ppid, state, th_nr, elf_name );
[428]2653    }
2654    else
2655    {
[683]2656        length = snprintk( buffer, size,
2657        "PID %X | %s (BG) | %X | PPID %X | TS %X | %d | %s\n", 
[581]2658        pid, txt_name, process_ptr, ppid, state, th_nr, elf_name );
[428]2659    }
[683]2660
2661    // check length
2662    if( (length < 0) )
2663    {
2664        length = snprintk( buffer , size , 
2665        "buffer too small for process %x in cluster %x", pid , process_cxy );
2666    }
2667
2668    return length; 
2669
2670}  // end process_build_string()
2671
2672/////////////////////////////////////////
2673void process_display( xptr_t process_xp )
2674{
2675    char  buffer[CONFIG_PROCESS_DISPLAY_BUF_SIZE];
2676
2677    // build the string to be displayed
2678    process_build_string( process_xp,
2679                          buffer,
2680                          CONFIG_PROCESS_DISPLAY_BUF_SIZE ); 
2681    // display the string
2682    nolock_puts( buffer );
2683
[428]2684}  // end process_display()
2685
2686
2687////////////////////////////////////////////////////////////////////////////////////////
2688//     Terminals related functions
2689////////////////////////////////////////////////////////////////////////////////////////
2690
[581]2691//////////////////////////////////
[485]2692uint32_t process_txt_alloc( void )
[428]2693{
2694    uint32_t  index;       // TXT terminal index
2695    xptr_t    chdev_xp;    // extended pointer on TXT_RX chdev
2696    chdev_t * chdev_ptr;   // local pointer on TXT_RX chdev
2697    cxy_t     chdev_cxy;   // TXT_RX chdev cluster
2698    xptr_t    root_xp;     // extended pointer on owner field in chdev
2699
2700    // scan the user TXT_RX chdevs (TXT0 is reserved for kernel)
2701    for( index = 1 ; index < LOCAL_CLUSTER->nb_txt_channels ; index ++ )
2702    {
2703        // get pointers on TXT_RX[index]
2704        chdev_xp  = chdev_dir.txt_rx[index];
2705        chdev_cxy = GET_CXY( chdev_xp );
2706        chdev_ptr = GET_PTR( chdev_xp );
2707
2708        // get extended pointer on root of attached process
2709        root_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.root );
2710
2711        // return free TXT index if found
2712        if( xlist_is_empty( root_xp ) ) return index; 
2713    }
2714
[669]2715    assert( __FUNCTION__, false , "no free TXT terminal found" );
[428]2716
2717    return -1;
2718
2719} // end process_txt_alloc()
2720
2721/////////////////////////////////////////////
[669]2722void process_txt_attach( xptr_t   process_xp,
2723                         uint32_t txt_id )
[428]2724{
2725    xptr_t      chdev_xp;     // extended pointer on TXT_RX chdev
2726    cxy_t       chdev_cxy;    // TXT_RX chdev cluster
2727    chdev_t *   chdev_ptr;    // local pointer on TXT_RX chdev
2728    xptr_t      root_xp;      // extended pointer on list root in chdev
2729    xptr_t      lock_xp;      // extended pointer on list lock in chdev
2730
[669]2731    process_t * process_ptr = GET_PTR(process_xp );
2732    cxy_t       process_cxy = GET_CXY(process_xp );
2733
[564]2734// check process is in owner cluster
[669]2735assert( __FUNCTION__, (process_xp == hal_remote_l64( XPTR( process_cxy , &process_ptr->owner_xp ))),
[564]2736"process descriptor not in owner cluster" );
[428]2737
[564]2738// check terminal index
[669]2739assert( __FUNCTION__, (txt_id < LOCAL_CLUSTER->nb_txt_channels) ,
[564]2740"illegal TXT terminal index" );
[428]2741
2742    // get pointers on TXT_RX[txt_id] chdev
2743    chdev_xp  = chdev_dir.txt_rx[txt_id];
2744    chdev_cxy = GET_CXY( chdev_xp );
2745    chdev_ptr = GET_PTR( chdev_xp );
2746
2747    // get extended pointer on root & lock of attached process list
2748    root_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.root );
2749    lock_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.lock );
2750
[564]2751    // get lock protecting list of processes attached to TXT
2752    remote_busylock_acquire( lock_xp );
2753
[669]2754    // insert owner process in list of attached processes to same TXT
2755    xlist_add_last( root_xp , XPTR( process_cxy , &process_ptr->txt_list ) );
[428]2756
[564]2757    // release lock protecting list of processes attached to TXT
2758    remote_busylock_release( lock_xp );
2759
[446]2760#if DEBUG_PROCESS_TXT
[610]2761thread_t * this = CURRENT_THREAD;
[457]2762uint32_t cycle = (uint32_t)hal_get_cycles();
[446]2763if( DEBUG_PROCESS_TXT < cycle )
[610]2764printk("\n[%s] thread[%x,%x] attached process %x to TXT %d / cycle %d\n",
[669]2765__FUNCTION__, this->process->pid, this->trdid,
2766hal_remote_l32( XPTR( process_cxy , &process_ptr->pid, txt_id , cycle );
[433]2767#endif
[428]2768
2769} // end process_txt_attach()
2770
[436]2771/////////////////////////////////////////////
2772void process_txt_detach( xptr_t  process_xp )
[428]2773{
[436]2774    process_t * process_ptr;  // local pointer on process in owner cluster
2775    cxy_t       process_cxy;  // process owner cluster
2776    pid_t       process_pid;  // process identifier
2777    xptr_t      file_xp;      // extended pointer on stdin file
[428]2778    xptr_t      chdev_xp;     // extended pointer on TXT_RX chdev
2779    cxy_t       chdev_cxy;    // TXT_RX chdev cluster
2780    chdev_t *   chdev_ptr;    // local pointer on TXT_RX chdev
2781    xptr_t      lock_xp;      // extended pointer on list lock in chdev
2782
[436]2783    // get process cluster, local pointer, and PID
2784    process_cxy = GET_CXY( process_xp );
2785    process_ptr = GET_PTR( process_xp );
[564]2786    process_pid = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid ) );
[436]2787
[564]2788// check process descriptor in owner cluster
[669]2789assert( __FUNCTION__, (CXY_FROM_PID( process_pid ) == process_cxy ) ,
[564]2790"process descriptor not in owner cluster" );
[436]2791
2792    // release TXT ownership (does nothing if not TXT owner)
2793    process_txt_transfer_ownership( process_xp );
[428]2794
[625]2795    // get extended pointer on process stdin pseudo file
[564]2796    file_xp = (xptr_t)hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[0] ) );
[436]2797
2798    // get pointers on TXT_RX chdev
2799    chdev_xp  = chdev_from_file( file_xp );
[428]2800    chdev_cxy = GET_CXY( chdev_xp );
2801    chdev_ptr = (chdev_t *)GET_PTR( chdev_xp );
2802
[436]2803    // get extended pointer on lock protecting attached process list
[428]2804    lock_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.lock );
2805
[564]2806    // get lock protecting list of processes attached to TXT
2807    remote_busylock_acquire( lock_xp );
2808
[428]2809    // unlink process from attached process list
[436]2810    xlist_unlink( XPTR( process_cxy , &process_ptr->txt_list ) );
2811
[564]2812    // release lock protecting list of processes attached to TXT
2813    remote_busylock_release( lock_xp );
2814
[446]2815#if DEBUG_PROCESS_TXT
[610]2816thread_t * this = CURRENT_THREAD;
[457]2817uint32_t cycle  = (uint32_t)hal_get_cycles();
[564]2818uint32_t txt_id = hal_remote_l32( XPTR( chdev_cxy , &chdev_ptr->channel ) );
[446]2819if( DEBUG_PROCESS_TXT < cycle )
[625]2820printk("\n[%s] thread[%x,%x] detached process %x from TXT%d / cycle %d\n",
[610]2821__FUNCTION__, this->process->pid, this->trdid, process_pid, txt_id, cycle );
[433]2822#endif
[428]2823
2824} // end process_txt_detach()
2825
2826///////////////////////////////////////////////////
[669]2827uint32_t process_txt_get_index( xptr_t process_xp )
2828{
2829
2830    // get target process cluster and local pointer
2831    process_t * process_ptr = GET_PTR( process_xp );
2832    cxy_t       process_cxy = GET_CXY( process_xp );
2833
2834assert( __FUNCTION__, (process_xp == hal_remote_l64( XPTR( process_cxy , &process_ptr->owner_xp))),
2835"process descriptor not in owner cluster" );
2836
2837    // get extended pointer on STDIN pseudo file in owner process descriptor
2838    xptr_t file_xp = hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[0]));
2839
2840assert( __FUNCTION__, (file_xp != XPTR_NULL),
2841"STDIN pseudo-file undefined in fd_array for process %x\n",
2842hal_remote_l32( XPTR( process_cxy , &process_ptr->pid ) ) );
2843
2844    // get extended pointer on TXT chdev
2845    xptr_t chdev_xp = chdev_from_file( file_xp );
2846 
2847assert( __FUNCTION__, (chdev_xp != XPTR_NULL),
2848"chdev undefined for STDIN pseudo-file of process %x\n",
2849hal_remote_l32( XPTR( process_cxy , &process_ptr->pid ) ) );
2850
2851    // get cluster and local pointer on chdev
2852   cxy_t     chdev_cxy = GET_CXY( chdev_xp );
2853   chdev_t * chdev_ptr = GET_PTR( chdev_xp );
2854 
2855   // get parent TXT terminal index
2856   return hal_remote_l32( XPTR( chdev_cxy , &chdev_ptr->channel ) );
2857
2858}  // end process_txt_get_index()
2859
2860///////////////////////////////////////////////////
[428]2861void process_txt_set_ownership( xptr_t process_xp )
2862{
2863    process_t * process_ptr;
2864    cxy_t       process_cxy;
2865    xptr_t      file_xp;
2866    xptr_t      txt_xp;     
2867    chdev_t   * txt_ptr;
2868    cxy_t       txt_cxy;
2869
[436]2870    // get pointers on process in owner cluster
[428]2871    process_cxy = GET_CXY( process_xp );
[435]2872    process_ptr = GET_PTR( process_xp );
[436]2873
2874    // check owner cluster
[669]2875    assert( __FUNCTION__, (process_xp == hal_remote_l64( XPTR( process_cxy , &process_ptr->owner_xp ))),
[624]2876    "process descriptor not in owner cluster" );
[436]2877
[428]2878    // get extended pointer on stdin pseudo file
[564]2879    file_xp = hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[0] ) );
[428]2880
2881    // get pointers on TXT chdev
2882    txt_xp  = chdev_from_file( file_xp );
2883    txt_cxy = GET_CXY( txt_xp );
[435]2884    txt_ptr = GET_PTR( txt_xp );
[428]2885
2886    // set owner field in TXT chdev
[564]2887    hal_remote_s64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , process_xp );
[428]2888
[446]2889#if DEBUG_PROCESS_TXT
[610]2890thread_t * this = CURRENT_THREAD;
[457]2891uint32_t cycle  = (uint32_t)hal_get_cycles();
[564]2892uint32_t txt_id = hal_remote_l32( XPTR( txt_cxy , &txt_ptr->channel ) );
[446]2893if( DEBUG_PROCESS_TXT < cycle )
[669]2894printk("\n[%s] thread[%x,%x] give TXT%d ownership to process / cycle %d\n",
2895__FUNCTION__, this->process->pid, this->trdid, txt_id, cycle );
[436]2896#endif
2897
[428]2898}  // end process_txt_set ownership()
2899
[436]2900////////////////////////////////////////////////////////
2901void process_txt_transfer_ownership( xptr_t process_xp )
[428]2902{
[436]2903    process_t * process_ptr;     // local pointer on process releasing ownership
2904    cxy_t       process_cxy;     // process cluster
2905    pid_t       process_pid;     // process identifier
[428]2906    xptr_t      file_xp;         // extended pointer on TXT_RX pseudo file
2907    xptr_t      txt_xp;          // extended pointer on TXT_RX chdev
[433]2908    chdev_t   * txt_ptr;         // local pointer on TXT_RX chdev
2909    cxy_t       txt_cxy;         // cluster of TXT_RX chdev
2910    uint32_t    txt_id;          // TXT_RX channel
[428]2911    xptr_t      owner_xp;        // extended pointer on current TXT_RX owner
2912    xptr_t      root_xp;         // extended pointer on root of attached process list
[436]2913    xptr_t      lock_xp;         // extended pointer on lock protecting attached process list
[428]2914    xptr_t      iter_xp;         // iterator for xlist
2915    xptr_t      current_xp;      // extended pointer on current process
[625]2916    bool_t      found;
[428]2917
[457]2918#if DEBUG_PROCESS_TXT
[610]2919thread_t * this  = CURRENT_THREAD;
2920uint32_t   cycle;
[457]2921#endif
2922
[625]2923    // get pointers on target process
[428]2924    process_cxy = GET_CXY( process_xp );
[435]2925    process_ptr = GET_PTR( process_xp );
[564]2926    process_pid = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid ) );
[436]2927
[625]2928// check owner cluster
[669]2929assert( __FUNCTION__, (process_cxy == CXY_FROM_PID( process_pid )) ,
[625]2930"process descriptor not in owner cluster" );
[436]2931
[428]2932    // get extended pointer on stdin pseudo file
[564]2933    file_xp = hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[0] ) );
[428]2934
2935    // get pointers on TXT chdev
2936    txt_xp  = chdev_from_file( file_xp );
2937    txt_cxy = GET_CXY( txt_xp );
[433]2938    txt_ptr = GET_PTR( txt_xp );
[428]2939
[625]2940    // get relevant infos from chdev descriptor
[564]2941    owner_xp = hal_remote_l64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) );
[625]2942    txt_id   = hal_remote_l32( XPTR( txt_cxy , &txt_ptr->channel ) );
[428]2943
[625]2944    // transfer ownership only if target process is the TXT owner
[436]2945    if( (owner_xp == process_xp) && (txt_id > 0) ) 
[428]2946    {
[436]2947        // get extended pointers on root and lock of attached processes list
2948        root_xp = XPTR( txt_cxy , &txt_ptr->ext.txt.root );
2949        lock_xp = XPTR( txt_cxy , &txt_ptr->ext.txt.lock );
[428]2950
[625]2951        if( process_get_ppid( process_xp ) != 1 )       // target process is not KSH
2952        {
2953            // get lock
2954            remote_busylock_acquire( lock_xp );
[436]2955
2956            // scan attached process list to find KSH process
[625]2957            found = false;
2958            for( iter_xp = hal_remote_l64( root_xp ) ;
2959                 (iter_xp != root_xp) && (found == false) ;
2960                 iter_xp = hal_remote_l64( iter_xp ) )
[436]2961            {
[625]2962                current_xp = XLIST_ELEMENT( iter_xp , process_t , txt_list );
[435]2963
[436]2964                if( process_get_ppid( current_xp ) == 1 )  // current is KSH
2965                {
2966                    // set owner field in TXT chdev
[564]2967                    hal_remote_s64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , current_xp );
[436]2968
[446]2969#if DEBUG_PROCESS_TXT
[610]2970cycle = (uint32_t)hal_get_cycles();
[446]2971if( DEBUG_PROCESS_TXT < cycle )
[625]2972printk("\n[%s] thread[%x,%x] transfered TXT%d ownership to KSH / cycle %d\n",
2973__FUNCTION__, this->process->pid, this->trdid, txt_id, cycle );
[436]2974#endif
[625]2975                    found = true;
[436]2976                }
2977            }
[625]2978
[436]2979            // release lock
[564]2980            remote_busylock_release( lock_xp );
[436]2981
[625]2982// It must exist a KSH process for each user TXT channel
[669]2983assert( __FUNCTION__, (found == true), "KSH process not found for TXT%d", txt_id );
[436]2984
2985        }
[625]2986        else                                           // target process is KSH
[436]2987        {
[625]2988            // get lock
2989            remote_busylock_acquire( lock_xp );
2990
[436]2991            // scan attached process list to find another process
[625]2992            found = false;
2993            for( iter_xp = hal_remote_l64( root_xp ) ;
2994                 (iter_xp != root_xp) && (found == false) ;
2995                 iter_xp = hal_remote_l64( iter_xp ) )
[428]2996            {
[436]2997                current_xp  = XLIST_ELEMENT( iter_xp , process_t , txt_list );
2998
2999                if( current_xp != process_xp )            // current is not KSH
3000                {
3001                    // set owner field in TXT chdev
[564]3002                    hal_remote_s64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , current_xp );
[436]3003
[446]3004#if DEBUG_PROCESS_TXT
[610]3005cycle  = (uint32_t)hal_get_cycles();
[625]3006cxy_t       current_cxy = GET_CXY( current_xp );
3007process_t * current_ptr = GET_PTR( current_xp );
3008uint32_t    new_pid     = hal_remote_l32( XPTR( current_cxy , &current_ptr->pid ) );
[446]3009if( DEBUG_PROCESS_TXT < cycle )
[625]3010printk("\n[%s] thread[%x,%x] transfered TXT%d ownership to process %x / cycle %d\n",
[610]3011__FUNCTION__,this->process->pid, this->trdid, txt_id, new_pid, cycle );
[436]3012#endif
[625]3013                    found = true;
[436]3014                }
[428]3015            }
[436]3016
3017            // release lock
[564]3018            remote_busylock_release( lock_xp );
[436]3019
3020            // no more owner for TXT if no other process found
[625]3021            if( found == false )
3022            {
3023                // set owner field in TXT chdev
3024                hal_remote_s64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , XPTR_NULL );
[436]3025
[446]3026#if DEBUG_PROCESS_TXT
[436]3027cycle = (uint32_t)hal_get_cycles();
[446]3028if( DEBUG_PROCESS_TXT < cycle )
[625]3029printk("\n[%s] thread[%x,%x] released TXT%d (no attached process) / cycle %d\n",
[610]3030__FUNCTION__, this->process->pid, this->trdid, txt_id, cycle );
[436]3031#endif
[625]3032            }
[428]3033        }
[436]3034    }
3035    else
3036    {
[433]3037
[446]3038#if DEBUG_PROCESS_TXT
[436]3039cycle = (uint32_t)hal_get_cycles();
[446]3040if( DEBUG_PROCESS_TXT < cycle )
[625]3041printk("\n[%s] thread[%x,%x] does nothing for process %x (not TXT owner) / cycle %d\n",
3042__FUNCTION__, this->process->pid, this->trdid, process_pid, cycle );
[436]3043#endif
3044
[428]3045    }
[625]3046
[436]3047}  // end process_txt_transfer_ownership()
[428]3048
3049
[564]3050////////////////////////////////////////////////
3051bool_t process_txt_is_owner( xptr_t process_xp )
[457]3052{
3053    // get local pointer and cluster of process in owner cluster
3054    cxy_t       process_cxy = GET_CXY( process_xp );
3055    process_t * process_ptr = GET_PTR( process_xp );
3056
[564]3057// check calling thread execute in target process owner cluster
3058pid_t process_pid = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid ) );
[669]3059assert( __FUNCTION__, (process_cxy == CXY_FROM_PID( process_pid )) ,
[624]3060"process descriptor not in owner cluster" );
[457]3061
3062    // get extended pointer on stdin pseudo file
[564]3063    xptr_t file_xp = hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[0] ) );
[457]3064
3065    // get pointers on TXT chdev
3066    xptr_t    txt_xp  = chdev_from_file( file_xp );
3067    cxy_t     txt_cxy = GET_CXY( txt_xp );
3068    chdev_t * txt_ptr = GET_PTR( txt_xp );
3069
3070    // get extended pointer on TXT_RX owner process
[564]3071    xptr_t owner_xp = hal_remote_l64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) );
[457]3072
3073    return (process_xp == owner_xp);
3074
3075}   // end process_txt_is_owner()
3076
[436]3077////////////////////////////////////////////////     
3078xptr_t process_txt_get_owner( uint32_t channel )
[435]3079{
3080    xptr_t      txt_rx_xp  = chdev_dir.txt_rx[channel];
3081    cxy_t       txt_rx_cxy = GET_CXY( txt_rx_xp );
3082    chdev_t *   txt_rx_ptr = GET_PTR( txt_rx_xp );
3083
[564]3084    return (xptr_t)hal_remote_l64( XPTR( txt_rx_cxy , &txt_rx_ptr->ext.txt.owner_xp ) );
[435]3085
[457]3086}  // end process_txt_get_owner()
3087
[435]3088///////////////////////////////////////////
3089void process_txt_display( uint32_t txt_id )
3090{
3091    xptr_t      chdev_xp;
3092    cxy_t       chdev_cxy;
3093    chdev_t   * chdev_ptr;
3094    xptr_t      root_xp;
3095    xptr_t      lock_xp;
3096    xptr_t      current_xp;
3097    xptr_t      iter_xp;
[443]3098    cxy_t       txt0_cxy;
3099    chdev_t   * txt0_ptr;
3100    xptr_t      txt0_xp;
3101    xptr_t      txt0_lock_xp;
3102   
[669]3103    assert( __FUNCTION__, (txt_id < LOCAL_CLUSTER->nb_txt_channels) ,
[492]3104    "illegal TXT terminal index" );
[435]3105
[443]3106    // get pointers on TXT0 chdev
3107    txt0_xp  = chdev_dir.txt_tx[0];
3108    txt0_cxy = GET_CXY( txt0_xp );
3109    txt0_ptr = GET_PTR( txt0_xp );
3110
3111    // get extended pointer on TXT0 lock
3112    txt0_lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock );
3113
[435]3114    // get pointers on TXT_RX[txt_id] chdev
3115    chdev_xp  = chdev_dir.txt_rx[txt_id];
3116    chdev_cxy = GET_CXY( chdev_xp );
3117    chdev_ptr = GET_PTR( chdev_xp );
3118
3119    // get extended pointer on root & lock of attached process list
3120    root_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.root );
3121    lock_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.lock );
3122
[443]3123    // get lock on attached process list
[564]3124    remote_busylock_acquire( lock_xp );
[443]3125
3126    // get TXT0 lock in busy waiting mode
[564]3127    remote_busylock_acquire( txt0_lock_xp );
[443]3128
[435]3129    // display header
[443]3130    nolock_printk("\n***** processes attached to TXT_%d / cycle %d\n",
3131    txt_id , (uint32_t)hal_get_cycles() );
[435]3132
[436]3133    // scan attached process list
[435]3134    XLIST_FOREACH( root_xp , iter_xp )
3135    {
3136        current_xp  = XLIST_ELEMENT( iter_xp , process_t , txt_list );
3137        process_display( current_xp );
3138    }
3139
[443]3140    // release TXT0 lock in busy waiting mode
[564]3141    remote_busylock_release( txt0_lock_xp );
[443]3142
3143    // release lock on attached process list
[564]3144    remote_busylock_release( lock_xp );
[435]3145
3146}  // end process_txt_display
Note: See TracBrowser for help on using the repository browser.