source: trunk/kernel/kern/process.c @ 673

Last change on this file since 673 was 669, checked in by alain, 4 years ago

1) Introduce up to 4 command lines arguments in the KSH "load" command.
These arguments are transfered to the user process through the
argc/argv mechanism, using the user space "args" vseg.

2) Introduce the named and anonymous "pipes", for inter-process communication
through the pipe() and mkfifo() syscalls.

3) Introduce the "chat" application to validate the two above mechanisms.

4) Improve printk() and assert() fonctions in printk.c.

File size: 107.0 KB
RevLine 
[1]1/*
[564]2 * process.c - process related functions definition.
[172]3 *
[1]4 * Authors  Ghassan Almaless (2008,2009,2010,2011,2012)
5 *          Mohamed Lamine Karaoui (2015)
[657]6 *          Alain Greiner (2016,2017,2018,2019,2020)
[1]7 *
8 * Copyright (c) UPMC Sorbonne Universites
9 *
[409]10 * This file is part of ALMOS-MKH.
[1]11 *
[172]12 * ALMOS-MKH is free software; you can redistribute it and/or modify it
[1]13 * under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; version 2.0 of the License.
15 *
[172]16 * ALMOS-MKH is distributed in the hope that it will be useful, but
[1]17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
[172]22 * along with ALMOS-MKH; if not, write to the Free Software Foundation,
[1]23 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24 */
25
[14]26#include <kernel_config.h>
[457]27#include <hal_kernel_types.h>
[1]28#include <hal_remote.h>
29#include <hal_uspace.h>
[409]30#include <hal_irqmask.h>
[623]31#include <hal_vmm.h>
[1]32#include <errno.h>
33#include <printk.h>
34#include <memcpy.h>
35#include <bits.h>
36#include <kmem.h>
37#include <page.h>
38#include <vmm.h>
39#include <vfs.h>
40#include <core.h>
41#include <thread.h>
[428]42#include <chdev.h>
[669]43#include <ksocket.h>
[1]44#include <list.h>
[407]45#include <string.h>
[1]46#include <scheduler.h>
[564]47#include <busylock.h>
48#include <queuelock.h>
49#include <remote_queuelock.h>
50#include <rwlock.h>
51#include <remote_rwlock.h>
[1]52#include <dqdt.h>
53#include <cluster.h>
54#include <ppm.h>
55#include <boot_info.h>
56#include <process.h>
57#include <elf.h>
[23]58#include <syscalls.h>
[435]59#include <shared_syscalls.h>
[1]60
61//////////////////////////////////////////////////////////////////////////////////////////
62// Extern global variables
63//////////////////////////////////////////////////////////////////////////////////////////
64
[428]65extern process_t           process_zero;     // allocated in kernel_init.c
66extern chdev_directory_t   chdev_dir;        // allocated in kernel_init.c
[1]67
68//////////////////////////////////////////////////////////////////////////////////////////
69// Process initialisation related functions
70//////////////////////////////////////////////////////////////////////////////////////////
71
[583]72/////////////////////////////////
[503]73process_t * process_alloc( void )
[1]74{
[669]75
76assert( __FUNCTION__, (sizeof(process_t) < CONFIG_PPM_PAGE_SIZE),
77"process descriptor exceeds 1 page" );
78
[635]79        kmem_req_t req;
[1]80
[669]81    req.type  = KMEM_PPM;
82        req.order = 0;
83        req.flags = AF_KERNEL | AF_ZERO;
[635]84    return kmem_alloc( &req );
[1]85}
86
87////////////////////////////////////////
88void process_free( process_t * process )
89{
90    kmem_req_t  req;
91
[669]92        req.type = KMEM_PPM;
[1]93        req.ptr  = process;
94        kmem_free( &req );
95}
96
[625]97////////////////////////////////////////////////////
98error_t process_reference_init( process_t * process,
99                                pid_t       pid,
100                                xptr_t      parent_xp )
[1]101{
[625]102    error_t     error;
[610]103    xptr_t      process_xp;
[428]104    cxy_t       parent_cxy;
105    process_t * parent_ptr;
[407]106    xptr_t      stdin_xp;
107    xptr_t      stdout_xp;
108    xptr_t      stderr_xp;
109    uint32_t    stdin_id;
110    uint32_t    stdout_id;
111    uint32_t    stderr_id;
[428]112    uint32_t    txt_id;
113    char        rx_path[40];
114    char        tx_path[40];
115    pid_t       parent_pid;
[625]116    vmm_t     * vmm;
[1]117
[610]118    // build extended pointer on this reference process
119    process_xp = XPTR( local_cxy , process );
120
[625]121    // get pointer on process vmm
122    vmm = &process->vmm;
123
[428]124    // get parent process cluster and local pointer
125    parent_cxy = GET_CXY( parent_xp );
[435]126    parent_ptr = GET_PTR( parent_xp );
[204]127
[457]128    // get parent_pid
[564]129    parent_pid = hal_remote_l32( XPTR( parent_cxy , &parent_ptr->pid ) );
[428]130
[438]131#if DEBUG_PROCESS_REFERENCE_INIT
[610]132thread_t * this = CURRENT_THREAD;
[433]133uint32_t cycle = (uint32_t)hal_get_cycles();
[610]134if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
[625]135printk("\n[%s] thread[%x,%x] enter to initialize process %x / cycle %d\n",
136__FUNCTION__, this->process->pid, this->trdid, pid, cycle );
[433]137#endif
[428]138
[610]139    // initialize pid, ref_xp, parent_xp, owner_xp, term_state fields
[433]140        process->pid        = pid;
141    process->ref_xp     = XPTR( local_cxy , process );
[443]142    process->owner_xp   = XPTR( local_cxy , process );
[433]143    process->parent_xp  = parent_xp;
144    process->term_state = 0;
[428]145
[610]146    // initialize VFS root inode and CWD inode
147    process->vfs_root_xp = hal_remote_l64( XPTR( parent_cxy, &parent_ptr->vfs_root_xp ) );
148    process->cwd_xp      = hal_remote_l64( XPTR( parent_cxy, &parent_ptr->cwd_xp ) );
149
[625]150    // initialize VSL as empty
151    vmm->vsegs_nr = 0;
152        xlist_root_init( XPTR( local_cxy , &vmm->vsegs_root ) );
[564]153
[625]154    // create an empty GPT as required by the architecture
155    error = hal_gpt_create( &vmm->gpt );
156    if( error ) 
157    {
158        printk("\n[ERROR] in %s : cannot create empty GPT\n", __FUNCTION__ );
159        return -1;
160    }
161
162#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
163if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
164printk("\n[%s] thread[%x,%x] created empty GPT for process %x\n",
165__FUNCTION__, parent_pid, this->trdid, pid );
166#endif
167
[635]168    // initialize VSL lock
[625]169        remote_rwlock_init( XPTR( local_cxy , &vmm->vsl_lock ) , LOCK_VMM_VSL );
170
[635]171    // register kernel vsegs in user process VMM as required by the architecture
[625]172    error = hal_vmm_kernel_update( process );
173    if( error ) 
174    {
175        printk("\n[ERROR] in %s : cannot register kernel vsegs in VMM\n", __FUNCTION__ );
176        return -1;
177    }
178
179#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
180if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
[635]181printk("\n[%s] thread[%x,%x] registered kernel vsegs in VSL for process %x\n",
[625]182__FUNCTION__, parent_pid, this->trdid, pid );
183#endif
184
185    // create "args" and "envs" vsegs
186    // create "stacks" and "mmap" vsegs allocators
187    // initialize locks protecting GPT and VSL
188    error = vmm_user_init( process );
189    if( error ) 
190    {
191        printk("\n[ERROR] in %s : cannot register user vsegs in VMM\n", __FUNCTION__ );
192        return -1;
193    }
[415]194 
[438]195#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
[433]196cycle = (uint32_t)hal_get_cycles();
[610]197if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
[625]198printk("\n[%s] thread[%x,%x] initialized vmm for process %x\n", 
199__FUNCTION__, parent_pid, this->trdid, pid );
[433]200#endif
[1]201
[409]202    // initialize fd_array as empty
[408]203    process_fd_init( process );
[1]204
[428]205    // define the stdin/stdout/stderr pseudo files <=> select a TXT terminal.
[581]206    if( (pid == 1) || (parent_pid  == 1) )      // INIT or KSH  process
[408]207    {
[581]208        // select a TXT channel
209        if( pid == 1 )  txt_id = 0;                     // INIT
210        else            txt_id = process_txt_alloc();   // KSH
[428]211
[457]212        // attach process to TXT
[669]213        process_txt_attach( process_xp , txt_id ); 
[428]214
[457]215#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
216cycle = (uint32_t)hal_get_cycles();
[610]217if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
218printk("\n[%s] thread[%x,%x] / process %x attached to TXT%d / cycle %d\n", 
219__FUNCTION__, parent_pid, this->trdid, pid, txt_id, cycle );
[457]220#endif
[428]221        // build path to TXT_RX[i] and TXT_TX[i] chdevs
[669]222        snprintk( rx_path , 40 , "/dev/external/txt%d_rx", txt_id );
223        snprintk( tx_path , 40 , "/dev/external/txt%d_tx", txt_id );
[428]224
225        // create stdin pseudo file         
[610]226        error = vfs_open(  process->vfs_root_xp,
[428]227                           rx_path,
[610]228                           process_xp,
[408]229                           O_RDONLY, 
230                           0,                // FIXME chmod
231                           &stdin_xp, 
232                           &stdin_id );
[625]233        if( error )
234        {
235            printk("\n[ERROR] in %s : cannot open stdout pseudo-file\n", __FUNCTION__ );
236            return -1;
237        }
[1]238
[669]239assert( __FUNCTION__, (stdin_id == 0) , "stdin index must be 0" );
[428]240
[440]241#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
242cycle = (uint32_t)hal_get_cycles();
[610]243if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
244printk("\n[%s] thread[%x,%x] / stdin open for process %x / cycle %d\n", 
245__FUNCTION__, parent_pid, this->trdid, pid, cycle );
[440]246#endif
247
[428]248        // create stdout pseudo file         
[610]249        error = vfs_open(  process->vfs_root_xp,
[428]250                           tx_path,
[610]251                           process_xp,
[408]252                           O_WRONLY, 
253                           0,                // FIXME chmod
254                           &stdout_xp, 
255                           &stdout_id );
[625]256        if( error )
257        {
258            printk("\n[ERROR] in %s : cannot open stdout pseudo-file\n", __FUNCTION__ );
259            return -1;
260        }
[1]261
[669]262assert( __FUNCTION__, (stdout_id == 1) , "stdout index must be 1" );
[428]263
[440]264#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
265cycle = (uint32_t)hal_get_cycles();
[610]266if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
267printk("\n[%s] thread[%x,%x] / stdout open for process %x / cycle %d\n", 
268__FUNCTION__, parent_pid, this->trdid, pid, cycle );
[440]269#endif
270
[428]271        // create stderr pseudo file         
[610]272        error = vfs_open(  process->vfs_root_xp,
[428]273                           tx_path,
[610]274                           process_xp,
[408]275                           O_WRONLY, 
276                           0,                // FIXME chmod
277                           &stderr_xp, 
278                           &stderr_id );
[625]279        if( error )
280        {
281            printk("\n[ERROR] in %s : cannot open stderr pseudo-file\n", __FUNCTION__ );
282            return -1;
283        }
[428]284
[669]285assert( __FUNCTION__, (stderr_id == 2) , "stderr index must be 2" );
[428]286
[440]287#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
288cycle = (uint32_t)hal_get_cycles();
[610]289if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
290printk("\n[%s] thread[%x,%x] / stderr open for process %x / cycle %d\n", 
291__FUNCTION__, parent_pid, this->trdid, pid, cycle );
[440]292#endif
293
[408]294    }
[428]295    else                                            // normal user process
[408]296    {
[669]297        // get parent process TXT index
298        txt_id = process_txt_get_index( parent_xp );
[440]299
[669]300        // attach child process to same TXT terminal as parent
301        process_txt_attach( process_xp , txt_id ); 
[407]302
[669]303        // recreate all open files from parent process fd_array to child process fd_array
304        process_fd_replicate( process_xp , parent_xp );
[408]305    }
[407]306
[610]307    // initialize lock protecting CWD changes
[669]308    remote_busylock_init( XPTR( local_cxy , &process->cwd_lock ), LOCK_PROCESS_CWD );
[408]309
[438]310#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
[433]311cycle = (uint32_t)hal_get_cycles();
[610]312if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
313printk("\n[%s] thread[%x,%x] / set fd_array for process %x / cycle %d\n", 
314__FUNCTION__, parent_pid, this->trdid, pid , cycle );
[433]315#endif
[407]316
[408]317    // reset children list root
318    xlist_root_init( XPTR( local_cxy , &process->children_root ) );
319    process->children_nr     = 0;
[625]320    remote_queuelock_init( XPTR( local_cxy,
321                                 &process->children_lock ), LOCK_PROCESS_CHILDREN );
[407]322
[611]323    // reset semaphore / mutex / barrier / condvar list roots and lock
[408]324    xlist_root_init( XPTR( local_cxy , &process->sem_root ) );
325    xlist_root_init( XPTR( local_cxy , &process->mutex_root ) );
326    xlist_root_init( XPTR( local_cxy , &process->barrier_root ) );
327    xlist_root_init( XPTR( local_cxy , &process->condvar_root ) );
[625]328    remote_queuelock_init( XPTR( local_cxy , 
329                                 &process->sync_lock ), LOCK_PROCESS_USERSYNC );
[407]330
[611]331    // reset open directories root and lock
332    xlist_root_init( XPTR( local_cxy , &process->dir_root ) );
[625]333    remote_queuelock_init( XPTR( local_cxy , 
334                                 &process->dir_lock ), LOCK_PROCESS_DIR );
[611]335
[408]336    // register new process in the local cluster manager pref_tbl[]
337    lpid_t lpid = LPID_FROM_PID( pid );
338    LOCAL_CLUSTER->pmgr.pref_tbl[lpid] = XPTR( local_cxy , process );
[407]339
[408]340    // register new process descriptor in local cluster manager local_list
341    cluster_process_local_link( process );
[407]342
[408]343    // register new process descriptor in local cluster manager copies_list
344    cluster_process_copies_link( process );
[172]345
[564]346    // initialize th_tbl[] array and associated threads
[1]347    uint32_t i;
[564]348
349    for( i = 0 ; i < CONFIG_THREADS_MAX_PER_CLUSTER ; i++ )
[1]350        {
351        process->th_tbl[i] = NULL;
352    }
353    process->th_nr  = 0;
[564]354    rwlock_init( &process->th_lock , LOCK_PROCESS_THTBL );
[1]355
[124]356        hal_fence();
[1]357
[438]358#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
[433]359cycle = (uint32_t)hal_get_cycles();
[610]360if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
361printk("\n[%s] thread[%x,%x] exit for process %x / cycle %d\n", 
362__FUNCTION__, parent_pid, this->trdid, pid, cycle );
[433]363#endif
[101]364
[635]365#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
366hal_vmm_display( parent_xp , false );
367hal_vmm_display( XPTR( local_cxy , process ) , false );
368#endif
369
[625]370    return 0;
371
[428]372}  // process_reference_init()
[204]373
[1]374/////////////////////////////////////////////////////
375error_t process_copy_init( process_t * local_process,
376                           xptr_t      reference_process_xp )
377{
[625]378    error_t   error;
379    vmm_t   * vmm;
[415]380
[23]381    // get reference process cluster and local pointer
382    cxy_t       ref_cxy = GET_CXY( reference_process_xp );
[435]383    process_t * ref_ptr = GET_PTR( reference_process_xp );
[1]384
[625]385    // get pointer on process vmm
386    vmm = &local_process->vmm;
387
[428]388    // initialize PID, REF_XP, PARENT_XP, and STATE
[564]389    local_process->pid        = hal_remote_l32(  XPTR( ref_cxy , &ref_ptr->pid ) );
390    local_process->parent_xp  = hal_remote_l64( XPTR( ref_cxy , &ref_ptr->parent_xp ) );
[433]391    local_process->ref_xp     = reference_process_xp;
[443]392    local_process->owner_xp   = reference_process_xp;
[433]393    local_process->term_state = 0;
[407]394
[564]395#if DEBUG_PROCESS_COPY_INIT
[610]396thread_t * this = CURRENT_THREAD; 
[433]397uint32_t cycle = (uint32_t)hal_get_cycles();
[610]398if( DEBUG_PROCESS_COPY_INIT < cycle )
399printk("\n[%s] thread[%x,%x] enter for process %x / cycle %d\n",
400__FUNCTION__, this->process->pid, this->trdid, local_process->pid, cycle );
[433]401#endif
[407]402
[564]403// check user process
[669]404assert( __FUNCTION__, (local_process->pid != 0), "LPID cannot be 0" );
[564]405
[625]406    // initialize VSL as empty
407    vmm->vsegs_nr = 0;
408        xlist_root_init( XPTR( local_cxy , &vmm->vsegs_root ) );
[1]409
[625]410    // create an empty GPT as required by the architecture
411    error = hal_gpt_create( &vmm->gpt );
412    if( error ) 
413    {
414        printk("\n[ERROR] in %s : cannot create empty GPT\n", __FUNCTION__ );
415        return -1;
416    }
417
418    // initialize GPT and VSL locks
419        remote_rwlock_init( XPTR( local_cxy , &vmm->vsl_lock ) , LOCK_VMM_VSL );
420
421    // register kernel vsegs in VMM as required by the architecture
422    error = hal_vmm_kernel_update( local_process );
423    if( error ) 
424    {
425        printk("\n[ERROR] in %s : cannot register kernel vsegs in VMM\n", __FUNCTION__ );
426        return -1;
427    }
428
429    // create "args" and "envs" vsegs
430    // create "stacks" and "mmap" vsegs allocators
431    // initialize locks protecting GPT and VSL
432    error = vmm_user_init( local_process );
433    if( error ) 
434    {
435        printk("\n[ERROR] in %s : cannot register user vsegs in VMM\n", __FUNCTION__ );
436        return -1;
437    }
438 
439#if (DEBUG_PROCESS_COPY_INIT & 1)
440cycle = (uint32_t)hal_get_cycles();
441if( DEBUG_PROCESS_COPY_INIT < cycle )
442printk("\n[%s] thread[%x,%x] initialized vmm for process %x / cycle %d\n", 
443__FUNCTION__, parent_pid, this->trdid, pid, cycle );
444#endif
445
446    // set process file descriptors array
[23]447        process_fd_init( local_process );
[1]448
[625]449    // set vfs_root_xp / vfs_bin_xp / cwd_xp fields
[564]450    local_process->vfs_root_xp = hal_remote_l64( XPTR( ref_cxy , &ref_ptr->vfs_root_xp ) );
451    local_process->vfs_bin_xp  = hal_remote_l64( XPTR( ref_cxy , &ref_ptr->vfs_bin_xp ) );
[610]452    local_process->cwd_xp      = XPTR_NULL;
[1]453
454    // reset children list root (not used in a process descriptor copy)
455    xlist_root_init( XPTR( local_cxy , &local_process->children_root ) );
[172]456    local_process->children_nr   = 0;
[564]457    remote_queuelock_init( XPTR( local_cxy , &local_process->children_lock ),
458                           LOCK_PROCESS_CHILDREN );
[1]459
[428]460    // reset children_list (not used in a process descriptor copy)
461    xlist_entry_init( XPTR( local_cxy , &local_process->children_list ) );
[1]462
463    // reset semaphores list root (not used in a process descriptor copy)
464    xlist_root_init( XPTR( local_cxy , &local_process->sem_root ) );
[23]465    xlist_root_init( XPTR( local_cxy , &local_process->mutex_root ) );
466    xlist_root_init( XPTR( local_cxy , &local_process->barrier_root ) );
467    xlist_root_init( XPTR( local_cxy , &local_process->condvar_root ) );
[1]468
[564]469    // initialize th_tbl[] array and associated fields
[1]470    uint32_t i;
[564]471    for( i = 0 ; i < CONFIG_THREADS_MAX_PER_CLUSTER ; i++ )
[1]472        {
473        local_process->th_tbl[i] = NULL;
474    }
475    local_process->th_nr  = 0;
[564]476    rwlock_init( &local_process->th_lock , LOCK_PROCESS_THTBL );
[1]477
478    // register new process descriptor in local cluster manager local_list
479    cluster_process_local_link( local_process );
480
481    // register new process descriptor in owner cluster manager copies_list
482    cluster_process_copies_link( local_process );
483
[124]484        hal_fence();
[1]485
[438]486#if DEBUG_PROCESS_COPY_INIT
[433]487cycle = (uint32_t)hal_get_cycles();
[610]488if( DEBUG_PROCESS_COPY_INIT < cycle )
489printk("\n[%s] thread[%x,%x] exit for process %x / cycle %d\n",
490__FUNCTION__, this->process->pid, this->trdid, local_process->pid, cycle );
[433]491#endif
[279]492
[1]493    return 0;
494
[204]495} // end process_copy_init()
496
[1]497///////////////////////////////////////////
498void process_destroy( process_t * process )
499{
[428]500    xptr_t      parent_xp;
501    process_t * parent_ptr;
502    cxy_t       parent_cxy;
503    xptr_t      children_lock_xp;
[446]504    xptr_t      children_nr_xp;
[1]505
[437]506    pid_t       pid = process->pid;
507
[593]508// check no more threads
[669]509assert( __FUNCTION__, (process->th_nr == 0),
[618]510"process %x in cluster %x contains threads", pid , local_cxy );
[428]511
[438]512#if DEBUG_PROCESS_DESTROY
[610]513thread_t * this = CURRENT_THREAD;
[433]514uint32_t cycle = (uint32_t)hal_get_cycles();
[610]515if( DEBUG_PROCESS_DESTROY < cycle )
516printk("\n[%s] thread[%x,%x] enter for process %x in cluster %x / cycle %d\n",
517__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy, cycle );
[433]518#endif
[428]519
[618]520    // Destroy VMM
521    vmm_destroy( process );
522
523#if (DEBUG_PROCESS_DESTROY & 1)
524if( DEBUG_PROCESS_DESTROY < cycle )
525printk("\n[%s] thread[%x,%x] destroyed VMM for process %x in cluster %x\n",
526__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy );
527#endif
528
[436]529    // remove process from local_list in local cluster manager
530    cluster_process_local_unlink( process );
[1]531
[618]532#if (DEBUG_PROCESS_DESTROY & 1)
533if( DEBUG_PROCESS_DESTROY < cycle )
534printk("\n[%s] thread[%x,%x] removed process %x in cluster %x from local list\n",
535__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy );
536#endif
537
[436]538    // remove process from copies_list in owner cluster manager
539    cluster_process_copies_unlink( process );
[23]540
[618]541#if (DEBUG_PROCESS_DESTROY & 1)
542if( DEBUG_PROCESS_DESTROY < cycle )
543printk("\n[%s] thread[%x,%x] removed process %x in cluster %x from copies list\n",
544__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy );
545#endif
546
[625]547    // when target process cluster is the owner cluster
548    // - remove process from TXT list and transfer ownership
549    // - remove process from children_list
550    // - release PID
[437]551    if( CXY_FROM_PID( pid ) == local_cxy )
[428]552    {
[625]553        process_txt_detach( XPTR( local_cxy , process ) );
554
555#if (DEBUG_PROCESS_DESTROY & 1)
556if( DEBUG_PROCESS_DESTROY < cycle )
557printk("\n[%s] thread[%x,%x] removed process %x from TXT list\n",
558__FUNCTION__, this->process->pid, this->trdid, pid );
559#endif
560
[428]561        // get pointers on parent process
562        parent_xp  = process->parent_xp;
563        parent_cxy = GET_CXY( parent_xp );
564        parent_ptr = GET_PTR( parent_xp );
565
566        // get extended pointer on children_lock in parent process
567        children_lock_xp = XPTR( parent_cxy , &parent_ptr->children_lock );
[446]568        children_nr_xp   = XPTR( parent_cxy , &parent_ptr->children_nr );
[428]569
570        // remove process from children_list
[564]571        remote_queuelock_acquire( children_lock_xp );
[428]572        xlist_unlink( XPTR( local_cxy , &process->children_list ) );
[446]573            hal_remote_atomic_add( children_nr_xp , -1 );
[564]574        remote_queuelock_release( children_lock_xp );
[450]575
[618]576#if (DEBUG_PROCESS_DESTROY & 1)
577if( DEBUG_PROCESS_DESTROY < cycle )
[625]578printk("\n[%s] thread[%x,%x] removed process %x from parent process children list\n",
579__FUNCTION__, this->process->pid, this->trdid, pid );
[618]580#endif
581
[564]582        // release the process PID to cluster manager
583        cluster_pid_release( pid );
[428]584
[618]585#if (DEBUG_PROCESS_DESTROY & 1)
586if( DEBUG_PROCESS_DESTROY < cycle )
587printk("\n[%s] thread[%x,%x] released process PID %x to pmgr in cluster %x\n",
588__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy );
589#endif
[23]590
[618]591    }
[1]592
[623]593    // FIXME decrement the refcount on file pointer for vfs_bin_xp [AG]
594
[618]595    // FIXME close all open files [AG]
[623]596
[618]597    // FIXME synchronize dirty files [AG]
[1]598
[416]599    // release memory allocated to process descriptor
600    process_free( process );
[1]601
[438]602#if DEBUG_PROCESS_DESTROY
[433]603cycle = (uint32_t)hal_get_cycles();
[610]604if( DEBUG_PROCESS_DESTROY < cycle )
[669]605printk("\n[%s] thread[%x,%x] exit for process %x in cluster %x / cycle %d\n",
[610]606__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy, cycle );
[433]607#endif
[428]608
[407]609}  // end process_destroy()
610
[583]611///////////////////////////////////////////////////////////////////
[527]612const char * process_action_str( process_sigactions_t action_type )
[409]613{
[583]614    switch ( action_type )
615    {
616        case BLOCK_ALL_THREADS:   return "BLOCK";
617        case UNBLOCK_ALL_THREADS: return "UNBLOCK";
618        case DELETE_ALL_THREADS:  return "DELETE";
619        default:                  return "undefined";
620    }
[409]621}
622
[435]623////////////////////////////////////////
624void process_sigaction( pid_t       pid,
[457]625                        uint32_t    type )
[409]626{
627    cxy_t              owner_cxy;         // owner cluster identifier
628    lpid_t             lpid;              // process index in owner cluster
629    cluster_t        * cluster;           // pointer on cluster manager
630    xptr_t             root_xp;           // extended pointer on root of copies
631    xptr_t             lock_xp;           // extended pointer on lock protecting copies
632    xptr_t             iter_xp;           // iterator on copies list
633    xptr_t             process_xp;        // extended pointer on process copy
634    cxy_t              process_cxy;       // process copy cluster identifier
[457]635    process_t        * process_ptr;       // local pointer on process copy
[436]636    reg_t              save_sr;           // for critical section
[457]637    thread_t         * client;            // pointer on client thread
638    xptr_t             client_xp;         // extended pointer on client thread
639    process_t        * local;             // pointer on process copy in local cluster
640    uint32_t           remote_nr;         // number of remote process copies
[619]641    rpc_desc_t         rpc;               // shared RPC descriptor
642    uint32_t           responses;         // shared RPC responses counter
[409]643
[457]644    client    = CURRENT_THREAD;
645    client_xp = XPTR( local_cxy , client );
646    local     = NULL;
647    remote_nr = 0;
[435]648
[583]649    // check calling thread can yield
650    thread_assert_can_yield( client , __FUNCTION__ );
[564]651
[438]652#if DEBUG_PROCESS_SIGACTION
[433]653uint32_t cycle = (uint32_t)hal_get_cycles();
[438]654if( DEBUG_PROCESS_SIGACTION < cycle )
[593]655printk("\n[%s] thread[%x,%x] enter to %s process %x / cycle %d\n",
[583]656__FUNCTION__ , client->process->pid, client->trdid,
[457]657process_action_str( type ) , pid , cycle );
[433]658#endif
[409]659
[436]660    // get pointer on local cluster manager
[416]661    cluster = LOCAL_CLUSTER;
662
[409]663    // get owner cluster identifier and process lpid
[435]664    owner_cxy = CXY_FROM_PID( pid );
665    lpid      = LPID_FROM_PID( pid );
[409]666
[593]667    // get root of list of copies and lock from owner cluster
[436]668    root_xp   = XPTR( owner_cxy , &cluster->pmgr.copies_root[lpid] );
669    lock_xp   = XPTR( owner_cxy , &cluster->pmgr.copies_lock[lpid] );
[435]670
[583]671// check action type
[669]672assert( __FUNCTION__, ((type == DELETE_ALL_THREADS ) ||
[583]673         (type == BLOCK_ALL_THREADS )  ||
674         (type == UNBLOCK_ALL_THREADS )), "illegal action type" );
[416]675             
[593]676    // This client thread send parallel RPCs to all remote clusters containing
[564]677    // target process copies, wait all responses, and then handles directly
678    // the threads in local cluster, when required.
[457]679    // The client thread allocates a - shared - RPC descriptor in the stack,
680    // because all parallel, non-blocking, server threads use the same input
681    // arguments, and use the shared RPC response field
[436]682
683    // mask IRQs
684    hal_disable_irq( &save_sr);
685
[457]686    // client thread blocks itself
687    thread_block( client_xp , THREAD_BLOCKED_RPC );
[436]688
[619]689    // initialize RPC responses counter
690    responses = 0;
691
[436]692    // initialize shared RPC descriptor
[619]693    // can be shared, because no out arguments
694    rpc.rsp       = &responses;
[438]695    rpc.blocking  = false;
696    rpc.index     = RPC_PROCESS_SIGACTION;
697    rpc.thread    = client;
698    rpc.lid       = client->core->lid;
[611]699    rpc.args[0]   = pid;
700    rpc.args[1]   = type;
[436]701
[611]702    // take the lock protecting process copies
703    remote_queuelock_acquire( lock_xp );
704
[457]705    // scan list of process copies
[409]706    XLIST_FOREACH( root_xp , iter_xp )
707    {
[457]708        // get extended pointers and cluster on process
[440]709        process_xp  = XLIST_ELEMENT( iter_xp , process_t , copies_list );
710        process_cxy = GET_CXY( process_xp );
[457]711        process_ptr = GET_PTR( process_xp );
[440]712
[593]713        if( process_cxy == local_cxy )    // process copy is local
[457]714        { 
715            local = process_ptr;
716        }
[593]717        else                              // process copy is remote
[457]718        {
719            // update number of remote process copies
720            remote_nr++;
721
[619]722            // atomically increment RPC responses counter
723            hal_atomic_add( &responses , 1 );
[457]724
[438]725#if DEBUG_PROCESS_SIGACTION
726if( DEBUG_PROCESS_SIGACTION < cycle )
[593]727printk("\n[%s] thread[%x,%x] send RPC to cluster %x for process %x\n",
[583]728__FUNCTION__, client->process->pid, client->trdid, process_cxy, pid );
[433]729#endif
[457]730            // call RPC in target cluster
[619]731            rpc_send( process_cxy , &rpc );
[457]732        }
733    }  // end list of copies
734
[409]735    // release the lock protecting process copies
[564]736    remote_queuelock_release( lock_xp );
[409]737
[436]738    // restore IRQs
739    hal_restore_irq( save_sr);
[409]740
[457]741    // - if there is remote process copies, the client thread deschedules,
742    //   (it will be unblocked by the last RPC server thread).
743    // - if there is no remote copies, the client thread unblock itself.
744    if( remote_nr )
745    {
746        sched_yield("blocked on rpc_process_sigaction");
747    } 
748    else
749    {
750        thread_unblock( client_xp , THREAD_BLOCKED_RPC );
751    }
[409]752
[457]753    // handle the local process copy if required
754    if( local != NULL )
755    {
756
757#if DEBUG_PROCESS_SIGACTION
758if( DEBUG_PROCESS_SIGACTION < cycle )
[593]759printk("\n[%s] thread[%x,%x] handles local process %x in cluster %x\n",
[583]760__FUNCTION__, client->process->pid, client->trdid, pid , local_cxy );
[457]761#endif
762        if     (type == DELETE_ALL_THREADS  ) process_delete_threads ( local , client_xp ); 
[583]763        else if(type == BLOCK_ALL_THREADS   ) process_block_threads  ( local ); 
[457]764        else if(type == UNBLOCK_ALL_THREADS ) process_unblock_threads( local );
765    }
766
[438]767#if DEBUG_PROCESS_SIGACTION
[433]768cycle = (uint32_t)hal_get_cycles();
[438]769if( DEBUG_PROCESS_SIGACTION < cycle )
[593]770printk("\n[%s] thread[%x,%x] exit after %s process %x / cycle %d\n",
[583]771__FUNCTION__, client->process->pid, client->trdid,
[457]772process_action_str( type ), pid, cycle );
[433]773#endif
[416]774
[409]775}  // end process_sigaction()
776
[433]777/////////////////////////////////////////////////
[583]778void process_block_threads( process_t * process )
[1]779{
[409]780    thread_t          * target;         // pointer on target thread
[433]781    thread_t          * this;           // pointer on calling thread
[564]782    uint32_t            ltid;           // index in process th_tbl[]
[409]783    uint32_t            count;          // requests counter
[593]784    volatile uint32_t   ack_count;      // acknowledges counter
[1]785
[416]786    // get calling thread pointer
[433]787    this = CURRENT_THREAD;
[407]788
[438]789#if DEBUG_PROCESS_SIGACTION
[564]790pid_t pid = process->pid;
[433]791uint32_t cycle = (uint32_t)hal_get_cycles();
[438]792if( DEBUG_PROCESS_SIGACTION < cycle )
[593]793printk("\n[%s] thread[%x,%x] enter for process %x in cluster %x / cycle %d\n",
[583]794__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy , cycle );
[433]795#endif
[409]796
[564]797// check target process is an user process
[669]798assert( __FUNCTION__, (LPID_FROM_PID( process->pid ) != 0 ),
[619]799"process %x is not an user process\n", process->pid );
[564]800
[409]801    // get lock protecting process th_tbl[]
[564]802    rwlock_rd_acquire( &process->th_lock );
[1]803
[440]804    // loop on target process local threads
[409]805    // we use both "ltid" and "count" because it can exist "holes" in th_tbl
[593]806    // - if the calling thread and the target thread are not running on the same
807    //   core, we ask the target scheduler to acknowlege the blocking
808    //   to be sure that the target thread is not running.
809    // - if the calling thread and the target thread are running on the same core,
810    //   we don't need confirmation from scheduler.
811           
[436]812    for( ltid = 0 , count = 0 , ack_count = 0 ; count < process->th_nr ; ltid++ )
[1]813    {
[409]814        target = process->th_tbl[ltid];
[1]815
[436]816        if( target != NULL )                                 // thread exist
[1]817        {
818            count++;
[409]819
[583]820            // set the global blocked bit in target thread descriptor.
821            thread_block( XPTR( local_cxy , target ) , THREAD_BLOCKED_GLOBAL );
[436]822 
[583]823            if( this->core->lid != target->core->lid )
824            {
825                // increment responses counter
826                hal_atomic_add( (void*)&ack_count , 1 );
[409]827
[583]828                // set FLAG_REQ_ACK and &ack_rsp_count in target descriptor
829                thread_set_req_ack( target , (uint32_t *)&ack_count );
[409]830
[583]831                // force scheduling on target thread
832                dev_pic_send_ipi( local_cxy , target->core->lid );
[409]833            }
[1]834        }
[172]835    }
836
[428]837    // release lock protecting process th_tbl[]
[564]838    rwlock_rd_release( &process->th_lock );
[416]839
[593]840    // wait other threads acknowledges  TODO this could be improved...
[409]841    while( 1 )
842    {
[610]843        // exit when all scheduler acknowledges received
[436]844        if ( ack_count == 0 ) break;
[409]845   
846        // wait 1000 cycles before retry
847        hal_fixed_delay( 1000 );
848    }
[1]849
[438]850#if DEBUG_PROCESS_SIGACTION
[433]851cycle = (uint32_t)hal_get_cycles();
[438]852if( DEBUG_PROCESS_SIGACTION < cycle )
[593]853printk("\n[%s] thread[%x,%x] exit for process %x in cluster %x / cycle %d\n",
854__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy , cycle );
[433]855#endif
[409]856
[428]857}  // end process_block_threads()
[409]858
[440]859/////////////////////////////////////////////////
860void process_delete_threads( process_t * process,
861                             xptr_t      client_xp )
[409]862{
[440]863    thread_t          * target;        // local pointer on target thread
864    xptr_t              target_xp;     // extended pointer on target thread
865    cxy_t               owner_cxy;     // owner process cluster
[409]866    uint32_t            ltid;          // index in process th_tbl
[440]867    uint32_t            count;         // threads counter
[409]868
[433]869    // get calling thread pointer
[409]870
[440]871    // get target process owner cluster
872    owner_cxy = CXY_FROM_PID( process->pid );
873
[438]874#if DEBUG_PROCESS_SIGACTION
[633]875thread_t * this  = CURRENT_THREAD;
876uint32_t   cycle = (uint32_t)hal_get_cycles();
[438]877if( DEBUG_PROCESS_SIGACTION < cycle )
[625]878printk("\n[%s] thread[%x,%x] enter for process %x n cluster %x / cycle %d\n",
879__FUNCTION__, this->process->pid, this->trdid, process->pid, local_cxy, cycle );
[433]880#endif
881
[564]882// check target process is an user process
[669]883assert( __FUNCTION__, (LPID_FROM_PID( process->pid ) != 0),
[619]884"process %x is not an user process\n", process->pid );
[564]885
[409]886    // get lock protecting process th_tbl[]
[583]887    rwlock_wr_acquire( &process->th_lock );
[409]888
[440]889    // loop on target process local threads                       
[416]890    // we use both "ltid" and "count" because it can exist "holes" in th_tbl
[440]891    for( ltid = 0 , count = 0  ; count < process->th_nr ; ltid++ )
[1]892    {
[409]893        target = process->th_tbl[ltid];
[1]894
[440]895        if( target != NULL )    // valid thread 
[1]896        {
[416]897            count++;
[440]898            target_xp = XPTR( local_cxy , target );
[1]899
[564]900            // main thread and client thread should not be deleted
[440]901            if( ((ltid != 0) || (owner_cxy != local_cxy)) &&         // not main thread
902                (client_xp) != target_xp )                           // not client thread
903            {
904                // mark target thread for delete and block it
[669]905                thread_delete_request( target_xp , true );                   // forced
[440]906            }
[409]907        }
908    }
[1]909
[428]910    // release lock protecting process th_tbl[]
[583]911    rwlock_wr_release( &process->th_lock );
[407]912
[438]913#if DEBUG_PROCESS_SIGACTION
[433]914cycle = (uint32_t)hal_get_cycles();
[438]915if( DEBUG_PROCESS_SIGACTION < cycle )
[593]916printk("\n[%s] thread[%x,%x] exit for process %x in cluster %x / cycle %d\n",
917__FUNCTION__, this->process->pid, this->trdid, process->pid, local_cxy , cycle );
[433]918#endif
[407]919
[440]920}  // end process_delete_threads()
[409]921
[440]922///////////////////////////////////////////////////
923void process_unblock_threads( process_t * process )
[409]924{
[440]925    thread_t          * target;        // pointer on target thead
[409]926    uint32_t            ltid;          // index in process th_tbl
[440]927    uint32_t            count;         // requests counter
[409]928
[438]929#if DEBUG_PROCESS_SIGACTION
[633]930thread_t * this  = CURRENT_THREAD;
931pid_t      pid   = process->pid;
932uint32_t   cycle = (uint32_t)hal_get_cycles();
[438]933if( DEBUG_PROCESS_SIGACTION < cycle )
[593]934printk("\n[%s] thread[%x,%x] enter for process %x in cluster %x / cycle %d\n",
[583]935__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy , cycle );
[433]936#endif
937
[564]938// check target process is an user process
[669]939assert( __FUNCTION__, ( LPID_FROM_PID( process->pid ) != 0 ),
[619]940"process %x is not an user process\n", process->pid );
[564]941
[416]942    // get lock protecting process th_tbl[]
[564]943    rwlock_rd_acquire( &process->th_lock );
[416]944
[440]945    // loop on process threads to unblock all threads
[416]946    // we use both "ltid" and "count" because it can exist "holes" in th_tbl
[440]947    for( ltid = 0 , count = 0 ; count < process->th_nr ; ltid++ )
[409]948    {
[416]949        target = process->th_tbl[ltid];
[409]950
[440]951        if( target != NULL )             // thread found
[409]952        {
953            count++;
[440]954
955            // reset the global blocked bit in target thread descriptor.
956            thread_unblock( XPTR( local_cxy , target ) , THREAD_BLOCKED_GLOBAL );
[1]957        }
958    }
959
[428]960    // release lock protecting process th_tbl[]
[564]961    rwlock_rd_release( &process->th_lock );
[407]962
[438]963#if DEBUG_PROCESS_SIGACTION
[433]964cycle = (uint32_t)hal_get_cycles();
[438]965if( DEBUG_PROCESS_SIGACTION < cycle )
[593]966printk("\n[%s] thread[%x,%x] exit for process %x in cluster %x / cycle %d\n",
[583]967__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy, cycle );
[433]968#endif
[1]969
[440]970}  // end process_unblock_threads()
[407]971
[1]972///////////////////////////////////////////////
973process_t * process_get_local_copy( pid_t pid )
974{
975    error_t        error;
[172]976    process_t    * process_ptr;   // local pointer on process
[23]977    xptr_t         process_xp;    // extended pointer on process
[1]978
979    cluster_t * cluster = LOCAL_CLUSTER;
980
[564]981#if DEBUG_PROCESS_GET_LOCAL_COPY
982thread_t * this = CURRENT_THREAD;
983uint32_t cycle = (uint32_t)hal_get_cycles();
984if( DEBUG_PROCESS_GET_LOCAL_COPY < cycle )
[593]985printk("\n[%s] thread[%x,%x] enter for process %x in cluster %x / cycle %d\n",
[583]986__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy, cycle );
[564]987#endif
988
[1]989    // get lock protecting local list of processes
[564]990    remote_queuelock_acquire( XPTR( local_cxy , &cluster->pmgr.local_lock ) );
[1]991
992    // scan the local list of process descriptors to find the process
[23]993    xptr_t  iter;
994    bool_t  found = false;
995    XLIST_FOREACH( XPTR( local_cxy , &cluster->pmgr.local_root ) , iter )
[1]996    {
[23]997        process_xp  = XLIST_ELEMENT( iter , process_t , local_list );
[435]998        process_ptr = GET_PTR( process_xp );
[23]999        if( process_ptr->pid == pid )
[1]1000        {
1001            found = true;
1002            break;
1003        }
1004    }
1005
1006    // release lock protecting local list of processes
[564]1007    remote_queuelock_release( XPTR( local_cxy , &cluster->pmgr.local_lock ) );
[1]1008
[172]1009    // allocate memory for a new local process descriptor
[440]1010    // and initialise it from reference cluster if not found
[1]1011    if( !found )
1012    {
1013        // get extended pointer on reference process descriptor
[23]1014        xptr_t ref_xp = cluster_get_reference_process_from_pid( pid );
[1]1015
[669]1016        assert( __FUNCTION__, (ref_xp != XPTR_NULL) , "illegal pid\n" );
[23]1017
[1]1018        // allocate memory for local process descriptor
[23]1019        process_ptr = process_alloc();
[443]1020
[23]1021        if( process_ptr == NULL )  return NULL;
[1]1022
1023        // initialize local process descriptor copy
[23]1024        error = process_copy_init( process_ptr , ref_xp );
[443]1025
[1]1026        if( error ) return NULL;
1027    }
1028
[440]1029#if DEBUG_PROCESS_GET_LOCAL_COPY
[564]1030cycle = (uint32_t)hal_get_cycles();
[440]1031if( DEBUG_PROCESS_GET_LOCAL_COPY < cycle )
[593]1032printk("\n[%s] thread[%x,%x] exit in cluster %x / process %x / cycle %d\n",
[583]1033__FUNCTION__, this->process->pid, this->trdid, local_cxy, process_ptr, cycle );
[440]1034#endif
1035
[23]1036    return process_ptr;
[1]1037
[409]1038}  // end process_get_local_copy()
1039
[436]1040////////////////////////////////////////////
1041pid_t process_get_ppid( xptr_t  process_xp )
1042{
1043    cxy_t       process_cxy;
1044    process_t * process_ptr;
1045    xptr_t      parent_xp;
1046    cxy_t       parent_cxy;
1047    process_t * parent_ptr;
1048
1049    // get process cluster and local pointer
1050    process_cxy = GET_CXY( process_xp );
1051    process_ptr = GET_PTR( process_xp );
1052
1053    // get pointers on parent process
[564]1054    parent_xp  = (xptr_t)hal_remote_l64( XPTR( process_cxy , &process_ptr->parent_xp ) );
[436]1055    parent_cxy = GET_CXY( parent_xp );
1056    parent_ptr = GET_PTR( parent_xp );
1057
[564]1058    return hal_remote_l32( XPTR( parent_cxy , &parent_ptr->pid ) );
[436]1059}
1060
[1]1061//////////////////////////////////////////////////////////////////////////////////////////
1062// File descriptor array related functions
1063//////////////////////////////////////////////////////////////////////////////////////////
1064
1065///////////////////////////////////////////
[662]1066char * process_fd_type_str( uint32_t type )
1067{
1068    switch( type )
1069    {
[669]1070        case FILE_TYPE_REG : return "FILE";
1071        case FILE_TYPE_DIR  : return "DIR";
1072        case FILE_TYPE_FIFO : return "FIFO";
1073        case FILE_TYPE_PIPE : return "PIPE";
1074        case FILE_TYPE_SOCK : return "SOCK";
1075        case FILE_TYPE_DEV  : return "DEV";
1076        case FILE_TYPE_BLK  : return "BLK";
1077        case FILE_TYPE_SYML : return "SYML";
[662]1078       
1079        default              : return "undefined";
1080    }
1081}
1082   
1083///////////////////////////////////////////
[1]1084void process_fd_init( process_t * process )
1085{
1086    uint32_t fd;
1087
[610]1088    // initialize lock
[564]1089    remote_queuelock_init( XPTR( local_cxy , &process->fd_array.lock ), LOCK_PROCESS_FDARRAY );
[1]1090
[610]1091    // initialize number of open files
[662]1092    process->fd_array.max = 0;
[23]1093
[1]1094    // initialize array
[23]1095    for ( fd = 0 ; fd < CONFIG_PROCESS_FILE_MAX_NR ; fd++ )
[1]1096    {
1097        process->fd_array.array[fd] = XPTR_NULL;
1098    }
1099}
[635]1100
[610]1101////////////////////////////////////////////////////
1102error_t process_fd_register( xptr_t      process_xp,
[407]1103                             xptr_t      file_xp,
1104                             uint32_t  * fdid )
[1]1105{
1106    bool_t    found;
[23]1107    uint32_t  id;
[662]1108    uint32_t  max;             // current value of max non-free slot index
1109    xptr_t    entry_xp;        // current value of one fd_array entry
1110    xptr_t    lock_xp;         // extended pointer on lock protecting fd_array
1111    xptr_t    max_xp;          // extended pointer on max field in fd_array
[1]1112
[657]1113    // get target process cluster and local pointer
[610]1114    process_t * process_ptr = GET_PTR( process_xp );
1115    cxy_t       process_cxy = GET_CXY( process_xp );
[23]1116
[662]1117// check target process is owner process
[669]1118assert( __FUNCTION__, (process_xp == hal_remote_l64( XPTR( process_cxy , &process_ptr->owner_xp ) ) ),
[662]1119"process must be owner process\n" );
[610]1120
1121#if DEBUG_PROCESS_FD_REGISTER
1122thread_t * this  = CURRENT_THREAD;
1123uint32_t   cycle = (uint32_t)hal_get_cycles();
1124pid_t      pid   = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid) );
1125if( DEBUG_PROCESS_FD_REGISTER < cycle )
1126printk("\n[%s] thread[%x,%x] enter for process %x / cycle %d\n",
1127__FUNCTION__, this->process->pid, this->trdid, pid, cycle );
1128#endif
1129
[662]1130    // build extended pointers on lock & max
1131    lock_xp = XPTR( process_cxy , &process_ptr->fd_array.lock );
1132    max_xp  = XPTR( process_cxy , &process_ptr->fd_array.max );
[610]1133
[669]1134    // take lock protecting fd_array
[610]1135        remote_queuelock_acquire( lock_xp );
[23]1136
[1]1137    found   = false;
1138
[662]1139    // get current value of max_fdid
1140    max = hal_remote_l32( max_xp );
1141
[23]1142    for ( id = 0; id < CONFIG_PROCESS_FILE_MAX_NR ; id++ )
[1]1143    {
[662]1144        // get fd_array entry
1145        entry_xp = hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[id] ) );
1146       
[669]1147        // take the first empty slot
[662]1148        if ( entry_xp == XPTR_NULL )
[1]1149        {
[662]1150            // update  fd_array
[610]1151            hal_remote_s64( XPTR( process_cxy , &process_ptr->fd_array.array[id] ) , file_xp );
[564]1152
[662]1153            // update max when required
1154            if( id > max ) hal_remote_s32( max_xp , id );
1155
1156            // exit loop
[564]1157                        *fdid = id;
[1]1158            found = true;
1159            break;
1160        }
1161    }
1162
[610]1163    // release lock protecting fd_array
1164        remote_queuelock_release( lock_xp );
[1]1165
[610]1166#if DEBUG_PROCESS_FD_REGISTER
1167cycle = (uint32_t)hal_get_cycles();
1168if( DEBUG_PROCESS_FD_REGISTER < cycle )
1169printk("\n[%s] thread[%x,%x] exit for process %x / fdid %d / cycle %d\n",
1170__FUNCTION__, this->process->pid, this->trdid, pid, id, cycle );
1171#endif
1172
[428]1173    if ( !found ) return -1;
[1]1174    else          return 0;
1175
[610]1176}  // end process_fd_register()
1177
[657]1178/////////////////////////////////////////////
1179void process_fd_remove( xptr_t    process_xp,
1180                        uint32_t  fdid )
1181{
1182    pid_t       pid;           // target process PID
1183    lpid_t      lpid;          // target process LPID
[662]1184    xptr_t      file_xp;       // extended pointer on file descriptor
[657]1185    xptr_t      iter_xp;       // iterator for list of process copies
1186    xptr_t      copy_xp;       // extended pointer on process copy
1187    process_t * copy_ptr;      // local pointer on process copy 
1188    cxy_t       copy_cxy;      // process copy cluster identifier
1189
1190    // get target process cluster and local pointer
1191    process_t * process_ptr = GET_PTR( process_xp );
1192    cxy_t       process_cxy = GET_CXY( process_xp );
1193
[662]1194// check target process is owner process
[669]1195assert( __FUNCTION__, (process_xp == hal_remote_l64( XPTR( process_cxy , &process_ptr->owner_xp ) ) ),
[662]1196"process must be owner process\n" );
1197
[657]1198    // get target process pid and lpid
1199    pid  = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid) );
1200    lpid = LPID_FROM_PID( pid );
1201
1202#if DEBUG_PROCESS_FD_REMOVE
1203uint32_t    cycle = (uint32_t)hal_get_cycles();
1204thread_t  * this  = CURRENT_THREAD;
1205if( DEBUG_PROCESS_FD_REMOVE < cycle )
1206printk("\n[%s] thread[%x,%x] enter for fdid %d in process %x / cycle %d\n",
1207__FUNCTION__, this->process->pid, this->trdid, fdid, pid, cycle );
1208#endif
1209
[662]1210    // get extended pointer on file descriptor
1211    file_xp = hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[fdid] ));
1212
[657]1213    // build extended pointers on list_of_copies root and lock (in owner cluster)
1214    xptr_t copies_root_xp = XPTR( process_cxy , &LOCAL_CLUSTER->pmgr.copies_root[lpid] );
1215    xptr_t copies_lock_xp = XPTR( process_cxy , &LOCAL_CLUSTER->pmgr.copies_lock[lpid] );
1216 
[662]1217    // build extended pointer on fd_array lock and max
1218    xptr_t fd_lock_xp = XPTR( process_cxy , &process_ptr->fd_array.lock );
1219    xptr_t fd_max_xp  = XPTR( process_cxy , &process_ptr->fd_array.max );
[657]1220
[662]1221    // take lock protecting fd_array
[657]1222        remote_queuelock_acquire( fd_lock_xp );
1223
1224    // take the lock protecting the list of copies
1225    remote_queuelock_acquire( copies_lock_xp );
1226
[662]1227    // get max value
1228    uint32_t max = hal_remote_l32( fd_max_xp );
1229
[657]1230    // loop on list of process copies
1231    XLIST_FOREACH( copies_root_xp , iter_xp )
1232    {
1233        // get pointers on process copy
1234        copy_xp  = XLIST_ELEMENT( iter_xp , process_t , copies_list );
1235        copy_ptr = GET_PTR( copy_xp );
1236        copy_cxy = GET_CXY( copy_xp );
1237
1238        // release the fd_array entry in process copy
1239        hal_remote_s64( XPTR( copy_cxy , &copy_ptr->fd_array.array[fdid] ), XPTR_NULL );
1240    }
1241
[662]1242    // update max when required
1243    if( fdid == max ) hal_remote_s32( fd_max_xp , max-1 );
1244
[669]1245    // release the lock protecting fd_array
[657]1246        remote_queuelock_release( fd_lock_xp );
1247
1248    // release the lock protecting the list of copies
1249    remote_queuelock_release( copies_lock_xp );
1250
1251#if DEBUG_PROCESS_FD_REMOVE
1252cycle = (uint32_t)hal_get_cycles();
1253if( DEBUG_PROCESS_FD_REMOVE < cycle )
1254printk("\n[%s] thread[%x,%x] exit for fdid %d in process %x / cycle %d\n",
1255__FUNCTION__, this->process->pid, this->trdid, fdid, pid, cycle );
1256#endif
1257
1258}  // end process_fd_remove()
1259
[662]1260//////////////////////////////////////////////
1261void process_fd_clean_all( xptr_t process_xp )
[1]1262{
[669]1263    uint32_t  fdid;
[662]1264    xptr_t    file_xp;         // one fd_array entry
1265    xptr_t    lock_xp;         // extendad pointer on lock protecting fd_array
1266    uint32_t  max;             // number of registered files
1267
1268    // get process cluster, local pointer and PID
1269    process_t * process_ptr = GET_PTR( process_xp );
1270    cxy_t       process_cxy = GET_CXY( process_xp );
1271
1272// check target process is owner process
[669]1273assert( __FUNCTION__, (process_xp == hal_remote_l64( XPTR( process_cxy , &process_ptr->owner_xp )) ),
[662]1274"process must be owner process\n" );
1275
1276#if DEBUG_PROCESS_FD_CLEAN_ALL
1277thread_t * this  = CURRENT_THREAD;
1278uint32_t   cycle = (uint32_t)hal_get_cycles();
1279if( DEBUG_PROCESS_FD_CLEAN_ALL < cycle )
[669]1280printk("\n[%s] thread[%x,%x] enter / cycle %d\n",
1281__FUNCTION__, this->process->pid, this->trdid, cycle );
[662]1282
1283process_fd_display( process_xp );
1284#endif
1285
1286    // build extended pointer on lock protecting the fd_array
1287    lock_xp = XPTR( process_cxy , &process_ptr->fd_array.lock );
1288
1289    // get max index for fd_array
1290    max = hal_remote_l32( XPTR( process_cxy , &process_ptr->fd_array.max ));
1291
1292    // take lock protecting fd_array
1293        remote_queuelock_acquire( lock_xp );
1294
[669]1295    for( fdid = 0 ; fdid <= max ; fdid++ )
[662]1296    {
1297        // get fd_array entry
[669]1298        file_xp = hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[fdid] ) );
[662]1299       
1300        if ( file_xp != XPTR_NULL )
1301        {
[669]1302            vfs_file_t * file_ptr = GET_PTR( file_xp );
1303            cxy_t        file_cxy = GET_CXY( file_xp );
[662]1304
[669]1305            // get file type
1306            uint32_t file_type = hal_remote_l32( XPTR( file_cxy , &file_ptr->type ));
1307 
1308            if( file_type == FILE_TYPE_REG )
1309            {
1310                vfs_close( file_xp , fdid );
1311            }
1312            if( file_type == FILE_TYPE_SOCK )
1313            {
1314                socket_close( file_xp , fdid );
1315            }
[662]1316        }
1317    }
1318
1319    // release lock protecting fd_array
1320        remote_queuelock_release( lock_xp );
1321
1322#if DEBUG_PROCESS_FD_CLEAN_ALL
1323cycle = (uint32_t)hal_get_cycles();
1324if( DEBUG_PROCESS_FD_CLEAN_ALL < cycle )
1325printk("\n[%s] thread[%x,%x] exit for process %x / cycle %d\n",
1326__FUNCTION__, this->process->pid, this->trdid, pid, cycle );
1327#endif
1328
1329}  // end process_fd_clean_all()
1330
1331//////////////////////////////////////////////////////////////
1332xptr_t process_fd_get_xptr_from_owner( xptr_t      process_xp,
1333                                       uint32_t    fdid )
1334{
1335    cxy_t       process_cxy = GET_CXY( process_xp );
1336    process_t * process_ptr = GET_PTR( process_xp );
1337
[669]1338assert( __FUNCTION__, (hal_remote_l64( XPTR( process_cxy , &process_ptr->owner_xp )) == process_xp),
[662]1339"process_xp argument must be the owner process" );
1340
1341    // access owner process fd_array
1342    return hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[fdid] ));
1343
1344}  // end process_fd_get_xptr_from_owner()
1345
1346///////////////////////////////////////////////////////////
1347xptr_t process_fd_get_xptr_from_local( process_t * process,
1348                                       uint32_t    fdid )
1349{
[23]1350    xptr_t  file_xp;
[564]1351    xptr_t  lock_xp;
[1]1352
[23]1353    // access local copy of process descriptor
[407]1354    file_xp = process->fd_array.array[fdid];
[1]1355
[23]1356    if( file_xp == XPTR_NULL )
1357    {
[662]1358        // get owner process cluster and local pointer
1359        xptr_t      owner_xp  = process->owner_xp;
1360        cxy_t       owner_cxy = GET_CXY( owner_xp );
1361        process_t * owner_ptr = GET_PTR( owner_xp );
[1]1362
[662]1363        // build extended pointer on lock protecting fd_array
1364        lock_xp = XPTR( owner_cxy , &owner_ptr->fd_array.lock );
[564]1365
[662]1366        // take lock protecting fd_array
[564]1367            remote_queuelock_acquire( lock_xp );
1368
[669]1369        // access owner process descriptor
[662]1370        file_xp = hal_remote_l64( XPTR( owner_cxy , &owner_ptr->fd_array.array[fdid] ) );
[1]1371
[662]1372        if( file_xp != XPTR_NULL ) 
1373        {
1374           // update local fd_array
1375            process->fd_array.array[fdid] = file_xp;
1376        }
1377
1378        // release lock protecting fd_array
[564]1379            remote_queuelock_release( lock_xp );
[23]1380    }
[1]1381
[23]1382    return file_xp;
[1]1383
[662]1384}  // end process_fd_get_xptr_from_local()
[407]1385
[669]1386/////////////////////////////////////////
1387void process_fd_replicate( xptr_t dst_xp,
1388                           xptr_t src_xp )
[1]1389{
[669]1390    uint32_t fdid;      // current file descriptor index
1391    xptr_t   old_xp;    // extended pointer on a file descriptor (stored in SRC fd_array)
1392    xptr_t   new_xp;    // extended pointer on a file descriptor (stored in DST fd_array)
1393    error_t  error;
[1]1394
[669]1395    // get cluster and local pointer for SRC process
1396    cxy_t       src_cxy = GET_CXY( src_xp );
1397    process_t * src_ptr = GET_PTR( src_xp );
[1]1398
[669]1399assert( __FUNCTION__, (src_xp == hal_remote_l64( XPTR( src_cxy , &src_ptr->owner_xp ))),
1400"src_xp process not in owner cluster" );
[1]1401
[669]1402    // get cluster and local pointer for DST fd_array
1403    cxy_t       dst_cxy = GET_CXY( dst_xp );
1404    process_t * dst_ptr = GET_PTR( dst_xp );
1405
1406assert( __FUNCTION__, (dst_xp == hal_remote_l64( XPTR( dst_cxy , &dst_ptr->owner_xp ))),
1407"dst_xp process not in owner cluster" );
1408
1409    // build extende pointers on SRC fd_array lock and max fields
1410    xptr_t  src_lock_xp = XPTR( src_cxy , &src_ptr->fd_array.lock );
1411    xptr_t  src_max_xp  = XPTR( src_cxy , &src_ptr->fd_array.max );
1412
[1]1413    // get the remote lock protecting the src fd_array
[669]1414        remote_queuelock_acquire( src_lock_xp );
1415 
1416    // loop on fd_array entries
1417    for( fdid = 0 ; fdid <= hal_remote_l32( src_max_xp ) ; fdid++ )
[1]1418        {
[669]1419                old_xp = (xptr_t)hal_remote_l64( XPTR( src_cxy , &src_ptr->fd_array.array[fdid] ) );
[1]1420
[669]1421                if( old_xp != XPTR_NULL )
[1]1422                {
[669]1423            // get the existing file descriptor cluster and local pointer
1424            vfs_file_t * old_ptr = GET_PTR( old_xp );
1425            cxy_t        old_cxy = GET_CXY( old_xp );
[1]1426
[669]1427            // get existing file attributes and local pointer on inode
1428            uint32_t      attr      = hal_remote_l32( XPTR( old_cxy , &old_ptr->attr ) );
1429            vfs_inode_t * inode_ptr = hal_remote_lpt( XPTR( old_cxy , &old_ptr->inode ) );
1430
1431            // create a new file descriptor in same cluster as the existing one
1432            error = vfs_file_create( XPTR( old_cxy , inode_ptr ),
1433                                     attr,
1434                                     &new_xp );
1435            if( error )
1436            {
1437                printk("\n[ERROR] in %s : cannot create new file\n", __FUNCTION__ );
1438                return;
1439            }
1440
1441                        // register new_xp in DST fd_array
1442                        hal_remote_s64( XPTR( dst_cxy , &dst_ptr->fd_array.array[fdid] ) , new_xp );
[1]1443                }
1444        }
1445
1446    // release lock on source process fd_array
[669]1447        remote_queuelock_release( src_lock_xp );
[1]1448
[669]1449}  // end process_fd_replicate()
[407]1450
[564]1451
1452////////////////////////////////////
1453bool_t process_fd_array_full( void )
1454{
[662]1455    // get extended pointer on owner process
1456    xptr_t owner_xp = CURRENT_THREAD->process->owner_xp;
[564]1457
[662]1458    // get owner process cluster and local pointer
1459    process_t * owner_ptr = GET_PTR( owner_xp );
1460    cxy_t       owner_cxy = GET_CXY( owner_xp );
[564]1461
[662]1462    // get number of open file descriptors from  fd_array
1463    uint32_t max = hal_remote_l32( XPTR( owner_cxy , &owner_ptr->fd_array.max ));
[564]1464
[662]1465        return ( max == CONFIG_PROCESS_FILE_MAX_NR - 1 );
[564]1466}
1467
[662]1468////////////////////////////////////////////
1469void process_fd_display( xptr_t process_xp )
1470{
1471    uint32_t      fdid;
1472    xptr_t        file_xp;
1473    vfs_file_t *  file_ptr;
1474    cxy_t         file_cxy;
1475    uint32_t      file_type;
1476    xptr_t        inode_xp;
1477    vfs_inode_t * inode_ptr;
[564]1478
[662]1479    char          name[CONFIG_VFS_MAX_NAME_LENGTH];
1480
1481    // get process cluster and local pointer
1482    process_t * process_ptr = GET_PTR( process_xp );
1483    cxy_t       process_cxy = GET_CXY( process_xp );
1484
1485    // get process PID
1486    pid_t  pid = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid ));
1487
1488    // get pointers on owner process descriptor
1489    xptr_t      owner_xp  = hal_remote_l64( XPTR( process_cxy , &process_ptr->owner_xp ));
1490    process_t * owner_ptr = GET_PTR( owner_xp );
1491    cxy_t       owner_cxy = GET_CXY( owner_xp );
1492
1493    // get max fdid from owner process descriptor
1494    uint32_t max = hal_remote_l32( XPTR( owner_cxy , &owner_ptr->fd_array.max ));
1495
1496    printk("\n***** fd_array for pid %x in cluster %x / max %d *****\n",
1497    pid, process_cxy, max );
1498
1499    for( fdid = 0 ; fdid <= max ; fdid++ )
1500    {
1501        // get pointers on file descriptor
1502        file_xp  = hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[fdid] ));
1503        file_ptr = GET_PTR( file_xp );
1504        file_cxy = GET_CXY( file_xp );
1505
1506        if( file_xp != XPTR_NULL )
1507        {
1508            // get file type
1509            file_type = hal_remote_l32( XPTR( file_cxy , &file_ptr->type )); 
1510
[669]1511            // get file name if inode exist
1512            if( (file_type != FILE_TYPE_PIPE) && (file_type != FILE_TYPE_SOCK) )
[662]1513            {
1514                // get inode pointers
1515                inode_ptr = hal_remote_lpt( XPTR( file_cxy , &file_ptr->inode ));
1516                inode_xp  = XPTR( file_cxy , inode_ptr );
1517
1518                // get file name
1519                vfs_inode_get_name( inode_xp , name );
1520
[669]1521                // display relevant file descriptor info
1522                printk(" - %d : type %s / ptr %x (%s)\n",
1523                fdid, process_fd_type_str(file_type), file_ptr, name );
[662]1524            }
[669]1525            else    // PIPE or SOCK types
[662]1526            {
1527                // display relevant file decriptor info
[669]1528                printk(" - %d : type %s / ptr %x\n",
1529                fdid , process_fd_type_str(file_type), file_ptr );
[662]1530            }
1531        }
1532        else
1533        {
1534            printk(" - %d : empty slot\n",
1535            fdid );
1536        }
1537    }
1538}   // end process_fd_display()
1539
[1]1540////////////////////////////////////////////////////////////////////////////////////
1541//  Thread related functions
1542////////////////////////////////////////////////////////////////////////////////////
1543
1544/////////////////////////////////////////////////////
1545error_t process_register_thread( process_t * process,
1546                                 thread_t  * thread,
1547                                 trdid_t   * trdid )
1548{
[472]1549    ltid_t         ltid;
1550    bool_t         found = false;
1551 
[564]1552// check arguments
[669]1553assert( __FUNCTION__, (process != NULL) , "process argument is NULL" );
1554assert( __FUNCTION__, (thread != NULL) , "thread argument is NULL" );
[1]1555
[564]1556    // get the lock protecting th_tbl for all threads
1557    // but the idle thread executing kernel_init (cannot yield)
1558    if( thread->type != THREAD_IDLE ) rwlock_wr_acquire( &process->th_lock );
[1]1559
[583]1560    // scan th_tbl
[564]1561    for( ltid = 0 ; ltid < CONFIG_THREADS_MAX_PER_CLUSTER ; ltid++ )
[1]1562    {
1563        if( process->th_tbl[ltid] == NULL )
1564        {
1565            found = true;
1566            break;
1567        }
1568    }
1569
1570    if( found )
1571    {
1572        // register thread in th_tbl[]
1573        process->th_tbl[ltid] = thread;
1574        process->th_nr++;
1575
1576        // returns trdid
1577        *trdid = TRDID( local_cxy , ltid );
1578    }
1579
[583]1580    // release the lock protecting th_tbl
[564]1581    if( thread->type != THREAD_IDLE ) rwlock_wr_release( &process->th_lock );
[428]1582
[564]1583    return (found) ? 0 : 0xFFFFFFFF;
[204]1584
1585}  // end process_register_thread()
1586
[625]1587///////////////////////////////////////////////////
1588uint32_t process_remove_thread( thread_t * thread )
[1]1589{
[443]1590    uint32_t count;  // number of threads in local process descriptor
1591
[625]1592// check thread
[669]1593assert( __FUNCTION__, (thread != NULL) , "thread argument is NULL" );
[625]1594
[1]1595    process_t * process = thread->process;
1596
1597    // get thread local index
1598    ltid_t  ltid = LTID_FROM_TRDID( thread->trdid );
[564]1599   
1600    // get the lock protecting th_tbl[]
1601    rwlock_wr_acquire( &process->th_lock );
[428]1602
[583]1603    // get number of threads
[443]1604    count = process->th_nr;
[428]1605
[564]1606// check th_nr value
[669]1607assert( __FUNCTION__, (count > 0) , "process th_nr cannot be 0" );
[443]1608
[1]1609    // remove thread from th_tbl[]
1610    process->th_tbl[ltid] = NULL;
[450]1611    process->th_nr = count-1;
[1]1612
[583]1613    // release lock protecting th_tbl
[564]1614    rwlock_wr_release( &process->th_lock );
[428]1615
[625]1616    return count;
[443]1617
[450]1618}  // end process_remove_thread()
[204]1619
[408]1620/////////////////////////////////////////////////////////
1621error_t process_make_fork( xptr_t      parent_process_xp,
1622                           xptr_t      parent_thread_xp,
1623                           pid_t     * child_pid,
1624                           thread_t ** child_thread )
[1]1625{
[408]1626    process_t * process;         // local pointer on child process descriptor
1627    thread_t  * thread;          // local pointer on child thread descriptor
1628    pid_t       new_pid;         // process identifier for child process
1629    pid_t       parent_pid;      // process identifier for parent process
1630    xptr_t      ref_xp;          // extended pointer on reference process
[428]1631    xptr_t      vfs_bin_xp;      // extended pointer on .elf file
[408]1632    error_t     error;
[1]1633
[408]1634    // get cluster and local pointer for parent process
1635    cxy_t       parent_process_cxy = GET_CXY( parent_process_xp );
[435]1636    process_t * parent_process_ptr = GET_PTR( parent_process_xp );
[101]1637
[428]1638    // get parent process PID and extended pointer on .elf file
[564]1639    parent_pid = hal_remote_l32 (XPTR( parent_process_cxy , &parent_process_ptr->pid));
1640    vfs_bin_xp = hal_remote_l64(XPTR( parent_process_cxy , &parent_process_ptr->vfs_bin_xp));
[428]1641
[564]1642    // get extended pointer on reference process
1643    ref_xp = hal_remote_l64( XPTR( parent_process_cxy , &parent_process_ptr->ref_xp ) );
[438]1644
[564]1645// check parent process is the reference process
[669]1646assert( __FUNCTION__, (parent_process_xp == ref_xp ) ,
[624]1647"parent process must be the reference process" );
[407]1648
[438]1649#if DEBUG_PROCESS_MAKE_FORK
[635]1650uint32_t   cycle;
[583]1651thread_t * this  = CURRENT_THREAD;
1652trdid_t    trdid = this->trdid;
1653pid_t      pid   = this->process->pid;
[635]1654#endif
1655
1656#if( DEBUG_PROCESS_MAKE_FORK & 1 )
1657cycle   = (uint32_t)hal_get_cycles();
[438]1658if( DEBUG_PROCESS_MAKE_FORK < cycle )
[593]1659printk("\n[%s] thread[%x,%x] enter / cluster %x / cycle %d\n",
[583]1660__FUNCTION__, pid, trdid, local_cxy, cycle );
[433]1661#endif
[172]1662
[408]1663    // allocate a process descriptor
1664    process = process_alloc();
[635]1665
[408]1666    if( process == NULL )
1667    {
1668        printk("\n[ERROR] in %s : cannot get process in cluster %x\n", 
1669        __FUNCTION__, local_cxy ); 
1670        return -1;
1671    }
[1]1672
[408]1673    // allocate a child PID from local cluster
[416]1674    error = cluster_pid_alloc( process , &new_pid );
[428]1675    if( error ) 
[1]1676    {
[408]1677        printk("\n[ERROR] in %s : cannot get PID in cluster %x\n", 
1678        __FUNCTION__, local_cxy ); 
1679        process_free( process );
1680        return -1;
[1]1681    }
[408]1682
[469]1683#if( DEBUG_PROCESS_MAKE_FORK & 1 )
[457]1684cycle = (uint32_t)hal_get_cycles();
1685if( DEBUG_PROCESS_MAKE_FORK < cycle )
[625]1686printk("\n[%s] thread[%x,%x] allocated child_process %x / cycle %d\n",
[583]1687__FUNCTION__, pid, trdid, new_pid, cycle );
[457]1688#endif
1689
[408]1690    // initializes child process descriptor from parent process descriptor
[625]1691    error = process_reference_init( process,
1692                                    new_pid,
1693                                    parent_process_xp );
1694    if( error ) 
1695    {
1696        printk("\n[ERROR] in %s : cannot initialize child process in cluster %x\n", 
1697        __FUNCTION__, local_cxy ); 
1698        process_free( process );
1699        return -1;
1700    }
[408]1701
[438]1702#if( DEBUG_PROCESS_MAKE_FORK & 1 )
[433]1703cycle = (uint32_t)hal_get_cycles();
[438]1704if( DEBUG_PROCESS_MAKE_FORK < cycle )
[593]1705printk("\n[%s] thread[%x,%x] initialized child_process %x / cycle %d\n",
[583]1706__FUNCTION__, pid, trdid, new_pid, cycle );
[433]1707#endif
[408]1708
1709    // copy VMM from parent descriptor to child descriptor
1710    error = vmm_fork_copy( process,
1711                           parent_process_xp );
1712    if( error )
[101]1713    {
[408]1714        printk("\n[ERROR] in %s : cannot copy VMM in cluster %x\n", 
1715        __FUNCTION__, local_cxy ); 
1716        process_free( process );
1717        cluster_pid_release( new_pid );
1718        return -1;
[101]1719    }
[172]1720
[438]1721#if( DEBUG_PROCESS_MAKE_FORK & 1 )
[433]1722cycle = (uint32_t)hal_get_cycles();
[438]1723if( DEBUG_PROCESS_MAKE_FORK < cycle )
[669]1724{
1725    printk("\n[%s] thread[%x,%x] copied VMM from parent to child / cycle %d\n",
1726    __FUNCTION__, pid, trdid, cycle );
1727    hal_vmm_display( XPTR( local_cxy , process ) , true );
1728}
[433]1729#endif
[407]1730
[564]1731    // if parent_process is INIT, or if parent_process is the TXT owner,
1732    // the child_process becomes the owner of its TXT terminal
1733    if( (parent_pid == 1) || process_txt_is_owner( parent_process_xp ) )
[457]1734    {
1735        process_txt_set_ownership( XPTR( local_cxy , process ) );
1736
1737#if( DEBUG_PROCESS_MAKE_FORK & 1 )
1738cycle = (uint32_t)hal_get_cycles();
[626]1739if( DEBUG_PROCESS_MAKE_FORK < cycle )
[635]1740printk("\n[%s] thread[%x,%x] / child_process %x takes TXT ownership / cycle %d\n",
1741__FUNCTION__ , pid, trdid, new_pid, cycle );
[457]1742#endif
1743
1744    }
1745
[428]1746    // update extended pointer on .elf file
1747    process->vfs_bin_xp = vfs_bin_xp;
1748
[408]1749    // create child thread descriptor from parent thread descriptor
1750    error = thread_user_fork( parent_thread_xp,
1751                              process,
1752                              &thread );
1753    if( error )
1754    {
1755        printk("\n[ERROR] in %s : cannot create thread in cluster %x\n",
1756        __FUNCTION__, local_cxy ); 
1757        process_free( process );
1758        cluster_pid_release( new_pid );
1759        return -1;
1760    }
[172]1761
[564]1762// check main thread LTID
[669]1763assert( __FUNCTION__, (LTID_FROM_TRDID(thread->trdid) == 0) ,
[624]1764"main thread must have LTID == 0" );
[428]1765
[564]1766#if( DEBUG_PROCESS_MAKE_FORK & 1 )
[433]1767cycle = (uint32_t)hal_get_cycles();
[438]1768if( DEBUG_PROCESS_MAKE_FORK < cycle )
[593]1769printk("\n[%s] thread[%x,%x] created main thread %x / cycle %d\n", 
[583]1770__FUNCTION__, pid, trdid, thread, cycle );
[433]1771#endif
[1]1772
[635]1773    // set COW flag in DATA, ANON, REMOTE vsegs in parent process VMM
[629]1774    // this includes all parent process copies in all clusters
[408]1775    if( parent_process_cxy == local_cxy )   // reference is local
1776    {
1777        vmm_set_cow( parent_process_ptr );
1778    }
1779    else                                    // reference is remote
1780    {
1781        rpc_vmm_set_cow_client( parent_process_cxy,
1782                                parent_process_ptr );
1783    }
[1]1784
[625]1785    // set COW flag in DATA, ANON, REMOTE vsegs for child process VMM
[433]1786    vmm_set_cow( process );
1787 
[438]1788#if( DEBUG_PROCESS_MAKE_FORK & 1 )
[433]1789cycle = (uint32_t)hal_get_cycles();
[438]1790if( DEBUG_PROCESS_MAKE_FORK < cycle )
[635]1791printk("\n[%s] thread[%x,%x] set COW in DATA / ANON / REMOTE for parent and child / cycle %d\n",
[583]1792__FUNCTION__, pid, trdid, cycle );
[433]1793#endif
[101]1794
[428]1795    // get extended pointers on parent children_root, children_lock and children_nr
1796    xptr_t children_root_xp = XPTR( parent_process_cxy , &parent_process_ptr->children_root );
1797    xptr_t children_lock_xp = XPTR( parent_process_cxy , &parent_process_ptr->children_lock );
1798    xptr_t children_nr_xp   = XPTR( parent_process_cxy , &parent_process_ptr->children_nr   );
[101]1799
[428]1800    // register process in parent children list
[564]1801    remote_queuelock_acquire( children_lock_xp );
[428]1802        xlist_add_last( children_root_xp , XPTR( local_cxy , &process->children_list ) );
1803        hal_remote_atomic_add( children_nr_xp , 1 );
[564]1804    remote_queuelock_release( children_lock_xp );
[204]1805
[408]1806    // return success
1807    *child_thread = thread;
1808    *child_pid    = new_pid;
[1]1809
[438]1810#if DEBUG_PROCESS_MAKE_FORK
[433]1811cycle = (uint32_t)hal_get_cycles();
[438]1812if( DEBUG_PROCESS_MAKE_FORK < cycle )
[593]1813printk("\n[%s] thread[%x,%x] exit / created process %x / cycle %d\n",
[583]1814__FUNCTION__, pid, trdid, new_pid, cycle );
[433]1815#endif
[428]1816
[408]1817    return 0;
1818
[416]1819}   // end process_make_fork()
[408]1820
[669]1821////////////////////////////////////////////////i//////////////////////////////////////
1822// This static function is called by the thread_user_exec() function :
1823// - to register the main() arguments (args) in the <exec_info> structure.
1824// - to register the environment variables (envs) in the <exec_info> structure.
1825// In both cases the input is an array of NULL terminated string pointers in user
1826// space, and the strings can be dispatched anywhere in the user process space.
1827// This array of pointers is defined by the <u_pointers> argument. The empty slots
1828// contain the NULL value, and the N non-empty slots are indexed from 0 to (N-1).
1829// - The max number of envs, and the max number of args are defined by the
1830//   CONFIG_PROCESS_ARGS_NR and CONFIG_PROCESS_ENVS_MAX_NR parameters.
1831// - The numbers of pages to store the (args) and (envs) strings are defined by the
1832//   CONFIG_VMM_ENVS_SIZE and CONFIG_VMM_STACK_SIZE parameters.
1833///////////////////////////////////////////////////////////////////////////////////////
1834// Implementation note:
1835// It allocates a kernel buffer to store a kernel copy of both the array of pointers,
1836// and the strings. It set the pointers and copies the strings in this kernel buffer.
1837// Finally, it registers the buffer & the actual number of strings in the process
1838// exec_info structure  (defined in the <process.h> file).
1839///////////////////////////////////////////////////////////////////////////////////////
1840// @ is_args     : [in]    true if called for (args) / false if called for (envs).
1841// @ u_pointers  : [in]    array of pointers on the strings (in user space).
1842// @ exec_info   : [out]   pointer on the exec_info structure.
1843// @ return 0 if success / non-zero if too many strings or no memory.
1844///////////////////////////////////////////////////////////////////////////////////////
1845error_t process_exec_get_strings( bool_t         is_args,
1846                                  char        ** u_pointers,
1847                                  exec_info_t  * exec_info )
[408]1848{
[669]1849    uint32_t     index;           // slot index in pointers array
1850    uint32_t     length;          // string length (in bytes)
1851    uint32_t     pointers_bytes;  // number of bytes to store pointers
1852    uint32_t     max_index;       // max size of pointers array
1853    char      ** k_pointers;      // base of kernel array of pointers
1854    char       * k_buf_ptr;       // pointer on first empty slot in strings buffer
1855    uint32_t     k_buf_space;     // number of bytes available in string buffer
1856    kmem_req_t   req;             // kernel memory allocator request
1857    char       * k_buf;           // kernel buffer for both pointers & strings
1858
1859#if DEBUG_PROCESS_EXEC_GET_STRINGS
1860thread_t * this  = CURRENT_THREAD;
1861uint32_t   cycle = (uint32_t)hal_get_cycles();
1862#endif
1863
1864    // Allocate one block of physical memory for both the pointers and the strings
1865    // as defined by the CONFIG_VMM_ARGS_SIZE and CONFIG_VMM_ENVS_SIZE parameters
1866    // - the array of pointers is stored in the first bytes of the kernel buffer
1867    // - the strings themselve are stored in the next bytes of this buffer
1868    // Set the k_pointers, k_buf_ptr, k_buf_space, and max_index
1869
1870    if( is_args )
1871    {
1872        req.type   = KMEM_PPM;
1873        req.order  = bits_log2( CONFIG_VMM_ARGS_SIZE );
1874        req.flags  = AF_KERNEL | AF_ZERO;
1875        k_buf      = kmem_alloc( &req );
1876
1877        pointers_bytes = CONFIG_PROCESS_ARGS_MAX_NR * sizeof(char *);
1878        k_pointers     = (char **)k_buf;
1879        k_buf_ptr      = k_buf + pointers_bytes;
1880        k_buf_space    = (CONFIG_VMM_ARGS_SIZE * CONFIG_PPM_PAGE_SIZE) - pointers_bytes;
1881        max_index      = CONFIG_PROCESS_ARGS_MAX_NR;
1882
1883#if DEBUG_PROCESS_EXEC_GET_STRINGS
1884if( DEBUG_PROCESS_EXEC_GET_STRINGS < cycle )
1885printk("\n[%s] thread[%x,%x] for args / u_buf %x / k_buf %x\n",
1886__FUNCTION__, this->process->pid, this->trdid, u_pointers, k_buf );
1887#endif
1888
1889    }
1890    else
1891    {
1892        req.type   = KMEM_PPM;
1893        req.order  = bits_log2( CONFIG_VMM_ENVS_SIZE );
1894        req.flags  = AF_KERNEL | AF_ZERO;
1895        k_buf      = kmem_alloc( &req );
1896
1897        pointers_bytes = CONFIG_PROCESS_ENVS_MAX_NR * sizeof(char *);
1898        k_pointers     = (char **)k_buf;
1899        k_buf_ptr      = k_buf + pointers_bytes;
1900        k_buf_space    = (CONFIG_VMM_ENVS_SIZE * CONFIG_PPM_PAGE_SIZE) - pointers_bytes;
1901        max_index      = CONFIG_PROCESS_ENVS_MAX_NR;
1902
1903#if DEBUG_PROCESS_EXEC_GET_STRINGS
1904if( DEBUG_PROCESS_EXEC_GET_STRINGS < cycle )
1905printk("\n[%s] thread[%x,%x] for envs / u_buf %x / k_buf %x\n",
1906__FUNCTION__, this->process->pid, this->trdid, u_pointers, k_buf );
1907#endif
1908
1909    }
1910
1911    // copy the user array of pointers to kernel buffer
1912    hal_copy_from_uspace( XPTR( local_cxy , k_pointers ),
1913                          u_pointers,
1914                          pointers_bytes );
1915
1916    // WARNING : the pointers copied in the k_pointers[] array are user pointers,
1917    // after the loop below, the k_pointers[] array contains kernel pointers.
1918
1919#if DEBUG_PROCESS_EXEC_GET_STRINGS
1920if( DEBUG_PROCESS_EXEC_GET_STRINGS < cycle )
1921printk("\n[%s] thread[%x,%x] copied u_ptr array to k_ptr array\n"
1922"    p0 = %x / p1 = %x / p2 = %x / p3 = %x\n",
1923__FUNCTION__, this->process->pid, this->trdid,
1924k_pointers[0], k_pointers[1], k_pointers[2], k_pointers[3] );
1925#endif
1926
1927    // scan kernel array of pointers to copy strings to kernel buffer
1928    for( index = 0 ; index < max_index ; index++ )
1929    {
1930        // exit loop if (k_pointers[] == NUll)
1931        if( k_pointers[index] == NULL ) break;
1932
1933        // compute string length
1934        length = hal_strlen_from_uspace( k_pointers[index] ) + 1;
1935
1936        // return error if overflow in kernel buffer
1937        if( length > k_buf_space ) return -1;
1938
1939        // copy the string to kernel buffer
1940        hal_copy_from_uspace( XPTR( local_cxy , k_buf_ptr ),
1941                              k_pointers[index],
1942                              length );
1943
1944#if DEBUG_PROCESS_EXEC_GET_STRINGS
1945if( DEBUG_PROCESS_EXEC_GET_STRINGS < cycle )
1946printk("\n[%s] thread[%x,%x] copied string[%d] <%s> to kernel buffer / length %d\n",
1947__FUNCTION__, this->process->pid, this->trdid, index, k_buf_ptr, length );
1948#endif
1949
1950        // replace the user pointer by a kernel pointer in the k_pointer[] array
1951        k_pointers[index] = k_buf_ptr;
1952
1953        // increment loop variables
1954        k_buf_ptr   += length;
1955        k_buf_space -= length;
1956
1957    }  // end loop on index
1958
1959    // update into exec_info structure
1960    if( is_args )
1961    {
1962        exec_info->args_pointers  =  k_pointers;
1963        exec_info->args_nr        =  index;
1964    }
1965    else
1966    {
1967        exec_info->envs_pointers  =  k_pointers;
1968        exec_info->envs_buf_free  =  k_buf_ptr;
1969        exec_info->envs_nr        =  index;
1970    }
1971
1972#if DEBUG_PROCESS_EXEC_GET_STRINGS
1973if( DEBUG_PROCESS_EXEC_GET_STRINGS < cycle )
1974printk("\n[%s] thread[%x,%x] copied %d strings to kernel buffer\n",
1975__FUNCTION__, this->process->pid, this->trdid, index );
1976#endif
1977
1978    return 0;
1979
1980} // end process_exec_get_strings()
1981
1982/////////////////////////////////
1983error_t process_make_exec( void )
1984{
1985    thread_t       * this;                    // local pointer on this thread
[457]1986    process_t      * process;                 // local pointer on this process
1987    pid_t            pid;                     // this process identifier
[669]1988    trdid_t          trdid;                   // this thread identifier
[610]1989    xptr_t           ref_xp;                  // reference process for this process
[441]1990        error_t          error;                   // value returned by called functions
[669]1991    char           * elf_path;                // path to .elf file
[457]1992    xptr_t           file_xp;                 // extended pointer on .elf file descriptor
1993    uint32_t         file_id;                 // file index in fd_array
[669]1994    vseg_t         * vseg;                    // local pointer on created vseg(s)
1995    uint32_t         n;                       // index for loops
[446]1996
[669]1997    uint32_t         args_nr;                 // actual number of args (from exec_info)
1998    intptr_t         args_base;               // args vseg base address in user space
1999    uint32_t         args_size;               // args vseg size (bytes)
2000
2001    uint32_t         envs_nr;                 // actual number of envs (from exec_info)
2002    intptr_t         envs_base;               // envs vseg base address in user space
2003    uint32_t         envs_size;               // envs vseg size (bytes)
2004
2005    // get calling thread, process, pid, trdid, and ref_xp
2006    this    = CURRENT_THREAD;
2007    process = this->process;
[457]2008    pid     = process->pid;
[669]2009    trdid   = this->trdid;
[610]2010    ref_xp  = process->ref_xp;
[408]2011
[669]2012        // get .elf pathname from exec_info structure
2013        elf_path      = process->exec_info.path;
[408]2014
[438]2015#if DEBUG_PROCESS_MAKE_EXEC
[433]2016uint32_t cycle = (uint32_t)hal_get_cycles();
[635]2017if( DEBUG_PROCESS_MAKE_EXEC < cycle )
[669]2018printk("\n[%s] thread[%x,%x] enters for <%s> / cycle %d\n",
2019__FUNCTION__, pid, trdid, elf_path, cycle );
[433]2020#endif
[408]2021
[669]2022    // 1. open the file identified by <path>
[457]2023    file_xp = XPTR_NULL;
[564]2024    file_id = 0xFFFFFFFF;
[610]2025        error   = vfs_open( process->vfs_root_xp,
[669]2026                            elf_path,
[610]2027                        ref_xp,
[457]2028                            O_RDONLY,
2029                            0,
2030                            &file_xp,
2031                            &file_id );
2032        if( error )
2033        {
[669]2034                printk("\n[ERROR] in %s : thread[%x,%x] failed to open file <%s>\n",
2035        __FUNCTION__, pid, trdid, elf_path );
[457]2036                return -1;
2037        }
2038
[446]2039#if (DEBUG_PROCESS_MAKE_EXEC & 1)
[635]2040if( DEBUG_PROCESS_MAKE_EXEC < cycle )
[669]2041printk("\n[%s] thread[%x,%x] opened file <%s>\n",
2042__FUNCTION__, pid, trdid, elf_path );
[446]2043#endif
2044
[669]2045    // 2. delete all threads other than this main thread in all clusters
[457]2046    process_sigaction( pid , DELETE_ALL_THREADS );
[446]2047
[469]2048#if (DEBUG_PROCESS_MAKE_EXEC & 1)
[635]2049if( DEBUG_PROCESS_MAKE_EXEC < cycle )
[669]2050printk("\n[%s] thread[%x,%x] deleted existing threads\n",
2051__FUNCTION__, pid, trdid );
[469]2052#endif
2053
[669]2054    // 3. reset calling process VMM
[625]2055    vmm_user_reset( process );
[446]2056
[457]2057#if( DEBUG_PROCESS_MAKE_EXEC & 1 )
[635]2058if( DEBUG_PROCESS_MAKE_EXEC < cycle )
[669]2059{
2060    printk("\n[%s] thread[%x,%x] completed VMM reset\n",
2061    __FUNCTION__, pid, trdid );
2062    hal_vmm_display( ref_xp , true );
2063}
[457]2064#endif
[408]2065
[669]2066    // 4. register the "args" vseg in VSL and map it in GPT, if required
2067    // this vseg contains both the array of pointers and the strings
2068    args_nr = process->exec_info.args_nr;
2069
2070    if( args_nr > 0 )
[416]2071    {
[669]2072        // get args vseg base and size in user space
2073        args_base = CONFIG_VMM_UTILS_BASE << CONFIG_PPM_PAGE_SHIFT;
2074        args_size = CONFIG_VMM_ARGS_SIZE << CONFIG_PPM_PAGE_SHIFT;
2075
2076        // create and register args vseg in VMM
2077        vseg = vmm_create_vseg( process,
2078                                VSEG_TYPE_DATA,
2079                                args_base,
2080                                args_size,
2081                                0,                 // file_offset unused for DATA type
2082                                0,                 // file_size unused for DATA type
2083                                XPTR_NULL,         // mapper_xp unused for DATA type
2084                                0 );               // cxy unused for DATA type
2085        if( vseg == NULL )
2086        {
2087                 printk("\n[ERROR] in %s : thread[%x,%x] cannot get args vseg for <%s>\n",
2088             __FUNCTION__, pid, trdid, elf_path );
2089                     return -1;
2090        }
2091
[438]2092#if( DEBUG_PROCESS_MAKE_EXEC & 1 )
[635]2093if( DEBUG_PROCESS_MAKE_EXEC < cycle )
[669]2094{
2095    printk("\n[%s] thread[%x,%x] args vseg registered in new process VSL\n",
2096    __FUNCTION__, pid, trdid );
2097    hal_vmm_display( ref_xp , true );
2098}
[433]2099#endif
[669]2100        // map all pages for this "args" vseg
2101        uint32_t fake_attr;   // required for hal_gpt_lock_pte()
2102        ppn_t    fake_ppn;    // required for hal_gpt_lock_pte()
[428]2103
[669]2104        xptr_t   gpt  = XPTR( local_cxy , &process->vmm.gpt );
2105        uint32_t attr = GPT_MAPPED | GPT_SMALL | GPT_READABLE | GPT_USER | GPT_CACHABLE;
2106        vpn_t    vpn  = CONFIG_VMM_UTILS_BASE;
2107        ppn_t    ppn  = ((ppn_t)process->exec_info.args_pointers >> CONFIG_PPM_PAGE_SHIFT);
2108
2109        for( n = 0 ; n < CONFIG_VMM_ARGS_SIZE ; n++ ) 
2110        {
2111            // lock the PTE
2112            if (hal_gpt_lock_pte( gpt , vpn , &fake_attr , &fake_ppn ) )
2113            {
2114                printk("\n[ERROR] in %s : thread[%x,%x] cannot map args vpn %x for <%s>\n",
2115                __FUNCTION__, pid, trdid, vpn, elf_path );
2116                        return -1;
2117            }
2118
2119            // map and unlock the PTE
2120            hal_gpt_set_pte( gpt , vpn + n , attr , ppn + n );
2121        }
2122
2123#if( DEBUG_PROCESS_MAKE_EXEC & 1 )
2124if( DEBUG_PROCESS_MAKE_EXEC < cycle )
2125{
2126    printk("\n[%s] thread[%x,%x] args vseg mapped in new process GPT\n",
2127    __FUNCTION__, pid, trdid );
2128    hal_vmm_display( ref_xp , true );
2129}
2130#endif
2131
2132        // set user space pointers in array of pointers
2133        char  ** ptr    = process->exec_info.args_pointers;
2134
2135        for( n = 0 ; n < args_nr ; n++ )
2136        {
2137            ptr[n] = ptr[n] + args_base - (intptr_t)ptr;
2138        } 
2139    }
2140
2141    // 5. register the "envs" vseg in VSL and map it in GPT, if required
2142    // this vseg contains both the array of pointers and the strings
2143    envs_nr = process->exec_info.envs_nr;
2144
2145    if( envs_nr > 0 )
2146    {
2147        // get envs vseg base and size in user space from config
2148        envs_base = (CONFIG_VMM_UTILS_BASE + CONFIG_VMM_ARGS_SIZE) << CONFIG_PPM_PAGE_SHIFT;
2149        envs_size = CONFIG_VMM_ENVS_SIZE << CONFIG_PPM_PAGE_SHIFT;
2150
2151        // TODO (inspired from args)
2152    }
2153
2154
2155    // 6. register code & data vsegs, and entry-point in process VMM,
2156    // register extended pointer on .elf file in process descriptor
[457]2157        error = elf_load_process( file_xp , process );
[441]2158    if( error )
[1]2159        {
[669]2160                printk("\n[ERROR] in %s : thread[%x,%x] failed to access <%s>\n",
2161        __FUNCTION__, pid, trdid, elf_path );
[408]2162        return -1;
[1]2163        }
2164
[438]2165#if( DEBUG_PROCESS_MAKE_EXEC & 1 )
[635]2166if( DEBUG_PROCESS_MAKE_EXEC < cycle )
[669]2167{
2168    printk("\n[%s] thread[%x,%x] registered code/data vsegs / entry %x\n",
2169    __FUNCTION__, pid, trdid, process->vmm.entry_point );
2170    hal_vmm_display( ref_xp , true );
2171}
[433]2172#endif
[1]2173
[669]2174    // 7. allocate an user stack vseg for main thread
2175    vseg = vmm_create_vseg( process,
2176                            VSEG_TYPE_STACK,
2177                            LTID_FROM_TRDID( trdid ),
2178                            0,                 // length unused
2179                            0,                 // file_offset unused
2180                            0,                 // file_size unused
2181                            XPTR_NULL,         // mapper_xp unused
2182                            local_cxy );
2183    if( vseg == NULL )
2184    {
2185            printk("\n[ERROR] in %s : thread[%x,%x] cannot set u_stack vseg for <%s>\n",
2186        __FUNCTION__, pid, trdid, elf_path );
2187                return -1;
2188    }
2189
2190#if( DEBUG_PROCESS_MAKE_EXEC & 1 )
2191if( DEBUG_PROCESS_MAKE_EXEC < cycle )
2192{
2193    printk("\n[%s] thread[%x,%x] registered stack vseg\n",
2194    __FUNCTION__, pid, trdid );
2195    hal_vmm_display( ref_xp , true );
2196}
2197#endif
2198
2199    // update user stack in thread descriptor
2200    this->user_stack_vseg = vseg;
2201
2202    // 8. update the main thread descriptor ... and jumps (one way) to user code
2203    thread_user_exec( args_nr , args_base );
2204
[457]2205    if( error )
2206    {
[669]2207        printk("\n[ERROR] in %s : thread[%x,%x] cannot update thread for <%s>\n",
2208        __FUNCTION__ , pid, trdid, elf_path );
[408]2209        return -1;
[457]2210    }
[1]2211
[409]2212        return 0;
2213
2214}  // end process_make_exec()
2215
[457]2216
[623]2217////////////////////////////////////////////////
2218void process_zero_create( process_t   * process,
2219                          boot_info_t * info )
[428]2220{
[580]2221    error_t error;
2222    pid_t   pid;
[428]2223
[438]2224#if DEBUG_PROCESS_ZERO_CREATE
[433]2225uint32_t cycle = (uint32_t)hal_get_cycles();
[438]2226if( DEBUG_PROCESS_ZERO_CREATE < cycle )
[593]2227printk("\n[%s] enter / cluster %x / cycle %d\n",
[564]2228__FUNCTION__, local_cxy, cycle );
[433]2229#endif
[428]2230
[624]2231    // get pointer on VMM
2232    vmm_t * vmm = &process->vmm;
2233
[580]2234    // get PID from local cluster manager for this kernel process
2235    error = cluster_pid_alloc( process , &pid );
2236
2237    if( error || (LPID_FROM_PID( pid ) != 0) )
2238    {
2239        printk("\n[PANIC] in %s : cannot get valid PID in cluster %x / PID = %x\n",
2240        __FUNCTION__ , local_cxy, pid );
2241        hal_core_sleep();
2242    }
2243
[635]2244#if (DEBUG_PROCESS_ZERO_CREATE & 1)
2245if( DEBUG_PROCESS_ZERO_CREATE < cycle )
2246printk("\n[%s] allocated pid %x in cluster %x\n", __FUNCTION__, pid, local_cxy );
2247#endif
2248
[428]2249    // initialize PID, REF_XP, PARENT_XP, and STATE
[580]2250    // the kernel process_zero is its own parent_process,
2251    // reference_process, and owner_process, and cannot be killed...
2252    process->pid        = pid;
[433]2253    process->ref_xp     = XPTR( local_cxy , process );
[443]2254    process->owner_xp   = XPTR( local_cxy , process );
[580]2255    process->parent_xp  = XPTR( local_cxy , process );
[433]2256    process->term_state = 0;
[428]2257
[635]2258    // initialize VSL as empty
[624]2259    vmm->vsegs_nr = 0;
2260        xlist_root_init( XPTR( local_cxy , &vmm->vsegs_root ) );
[623]2261
[635]2262#if (DEBUG_PROCESS_ZERO_CREATE & 1)
2263if( DEBUG_PROCESS_ZERO_CREATE < cycle )
2264printk("\n[%s] initialized VSL empty in cluster %x\n", __FUNCTION__, local_cxy );
2265#endif
2266
2267    // initialize GPT as empty
[624]2268    error = hal_gpt_create( &vmm->gpt );
[635]2269
[624]2270    if( error ) 
2271    {
2272        printk("\n[PANIC] in %s : cannot create empty GPT\n", __FUNCTION__ );
2273        hal_core_sleep();
2274    }
2275
[635]2276#if (DEBUG_PROCESS_ZERO_CREATE & 1)
2277if( DEBUG_PROCESS_ZERO_CREATE < cycle )
2278printk("\n[%s] initialized GPT empty in cluster %x\n", __FUNCTION__, local_cxy );
2279#endif
2280
[625]2281    // initialize VSL and GPT locks
[629]2282    remote_rwlock_init( XPTR( local_cxy , &vmm->vsl_lock ) , LOCK_VMM_VSL );
[624]2283   
2284    // create kernel vsegs in GPT and VSL, as required by the hardware architecture
2285    error = hal_vmm_kernel_init( info );
[635]2286
[624]2287    if( error ) 
2288    {
2289        printk("\n[PANIC] in %s : cannot create kernel vsegs in VMM\n", __FUNCTION__ );
2290        hal_core_sleep();
2291    }
2292
[635]2293#if (DEBUG_PROCESS_ZERO_CREATE & 1)
2294if( DEBUG_PROCESS_ZERO_CREATE < cycle )
2295printk("\n[%s] initialized hal specific VMM in cluster%x\n", __FUNCTION__, local_cxy );
2296#endif
2297
[564]2298    // reset th_tbl[] array and associated fields
[428]2299    uint32_t i;
[564]2300    for( i = 0 ; i < CONFIG_THREADS_MAX_PER_CLUSTER ; i++ )
[428]2301        {
2302        process->th_tbl[i] = NULL;
2303    }
2304    process->th_nr  = 0;
[564]2305    rwlock_init( &process->th_lock , LOCK_PROCESS_THTBL );
[428]2306
[635]2307#if (DEBUG_PROCESS_ZERO_CREATE & 1)
2308if( DEBUG_PROCESS_ZERO_CREATE < cycle )
2309printk("\n[%s] initialized th_tbl[] in cluster%x\n", __FUNCTION__, local_cxy );
2310#endif
[564]2311
[428]2312    // reset children list as empty
2313    xlist_root_init( XPTR( local_cxy , &process->children_root ) );
2314    process->children_nr = 0;
[564]2315    remote_queuelock_init( XPTR( local_cxy , &process->children_lock ),
2316                           LOCK_PROCESS_CHILDREN );
[428]2317
[635]2318#if (DEBUG_PROCESS_ZERO_CREATE & 1)
2319if( DEBUG_PROCESS_ZERO_CREATE < cycle )
2320printk("\n[%s] initialized children list in cluster%x\n", __FUNCTION__, local_cxy );
2321#endif
2322
[580]2323    // register kernel process in cluster manager local_list
2324    cluster_process_local_link( process );
2325   
[428]2326        hal_fence();
2327
[438]2328#if DEBUG_PROCESS_ZERO_CREATE
[433]2329cycle = (uint32_t)hal_get_cycles();
[438]2330if( DEBUG_PROCESS_ZERO_CREATE < cycle )
[593]2331printk("\n[%s] exit / cluster %x / cycle %d\n",
[564]2332__FUNCTION__, local_cxy, cycle );
[433]2333#endif
[428]2334
[610]2335}  // end process_zero_create()
[428]2336
[564]2337////////////////////////////////
[485]2338void process_init_create( void )
[1]2339{
[428]2340    process_t      * process;       // local pointer on process descriptor
[409]2341    pid_t            pid;           // process_init identifier
2342    thread_t       * thread;        // local pointer on main thread
2343    pthread_attr_t   attr;          // main thread attributes
2344    lid_t            lid;           // selected core local index for main thread
[457]2345    xptr_t           file_xp;       // extended pointer on .elf file descriptor
2346    uint32_t         file_id;       // file index in fd_array
[409]2347    error_t          error;
[1]2348
[438]2349#if DEBUG_PROCESS_INIT_CREATE
[610]2350thread_t * this = CURRENT_THREAD;
[433]2351uint32_t cycle = (uint32_t)hal_get_cycles();
[438]2352if( DEBUG_PROCESS_INIT_CREATE < cycle )
[610]2353printk("\n[%s] thread[%x,%x] enter / cycle %d\n",
2354__FUNCTION__, this->process->pid, this->trdid, cycle );
[433]2355#endif
[1]2356
[408]2357    // allocates memory for process descriptor from local cluster
2358        process = process_alloc(); 
[625]2359    if( process == NULL )
2360    {
2361        printk("\n[PANIC] in %s : cannot allocate process\n", __FUNCTION__ );
2362        hal_core_sleep();
2363    }
[101]2364
[610]2365    // set the CWD and VFS_ROOT fields in process descriptor
2366    process->cwd_xp      = process_zero.vfs_root_xp;
2367    process->vfs_root_xp = process_zero.vfs_root_xp;
2368
[409]2369    // get PID from local cluster
[416]2370    error = cluster_pid_alloc( process , &pid );
[625]2371    if( error ) 
2372    {
2373        printk("\n[PANIC] in %s : cannot allocate PID\n", __FUNCTION__ );
2374        hal_core_sleep();
2375    }
2376    if( pid != 1 ) 
2377    {
2378        printk("\n[PANIC] in %s : process PID must be 0x1\n", __FUNCTION__ );
2379        hal_core_sleep();
2380    }
[408]2381
[409]2382    // initialize process descriptor / parent is local process_zero
[625]2383    error = process_reference_init( process,
2384                                    pid,
2385                                    XPTR( local_cxy , &process_zero ) ); 
2386    if( error )
2387    {
2388        printk("\n[PANIC] in %s : cannot initialize process\n", __FUNCTION__ );
2389        hal_core_sleep();
2390    }
[408]2391
[564]2392#if(DEBUG_PROCESS_INIT_CREATE & 1)
2393if( DEBUG_PROCESS_INIT_CREATE < cycle )
[610]2394printk("\n[%s] thread[%x,%x] initialized process descriptor\n",
2395__FUNCTION__, this->process->pid, this->trdid );
[564]2396#endif
2397
[457]2398    // open the file identified by CONFIG_PROCESS_INIT_PATH
2399    file_xp = XPTR_NULL;
2400    file_id = -1;
[610]2401        error   = vfs_open( process->vfs_root_xp,
[457]2402                            CONFIG_PROCESS_INIT_PATH,
[610]2403                        XPTR( local_cxy , process ),
[457]2404                            O_RDONLY,
2405                            0,
2406                            &file_xp,
2407                            &file_id );
[625]2408    if( error )
2409    {
2410        printk("\n[PANIC] in %s : cannot open file <%s>\n",
2411         __FUNCTION__, CONFIG_PROCESS_INIT_PATH  );
2412        hal_core_sleep();
2413    }
[457]2414
[564]2415#if(DEBUG_PROCESS_INIT_CREATE & 1)
2416if( DEBUG_PROCESS_INIT_CREATE < cycle )
[610]2417printk("\n[%s] thread[%x,%x] open .elf file decriptor\n",
2418__FUNCTION__, this->process->pid, this->trdid );
[564]2419#endif
2420
[625]2421    // register "code" and "data" vsegs as well as entry-point
[409]2422    // in process VMM, using information contained in the elf file.
[457]2423        error = elf_load_process( file_xp , process );
[101]2424
[625]2425    if( error ) 
2426    {
2427        printk("\n[PANIC] in %s : cannot access file <%s>\n",
2428         __FUNCTION__, CONFIG_PROCESS_INIT_PATH  );
2429        hal_core_sleep();
2430    }
[457]2431
[625]2432
[564]2433#if(DEBUG_PROCESS_INIT_CREATE & 1)
2434if( DEBUG_PROCESS_INIT_CREATE < cycle )
[669]2435{
2436    printk("\n[%s] thread[%x,%x] registered code/data vsegs in VMM\n",
2437    __FUNCTION__, this->process->pid, this->trdid );
2438    hal_vmm_display( XPTR( local_cxy , process ) , true );
2439}
[564]2440#endif
2441
[428]2442    // get extended pointers on process_zero children_root, children_lock
2443    xptr_t children_root_xp = XPTR( local_cxy , &process_zero.children_root );
2444    xptr_t children_lock_xp = XPTR( local_cxy , &process_zero.children_lock );
2445
[564]2446    // take lock protecting kernel process children list
2447    remote_queuelock_acquire( children_lock_xp );
2448
[428]2449    // register process INIT in parent local process_zero
2450        xlist_add_last( children_root_xp , XPTR( local_cxy , &process->children_list ) );
2451        hal_atomic_add( &process_zero.children_nr , 1 );
2452
[564]2453    // release lock protecting kernel process children list
2454    remote_queuelock_release( children_lock_xp );
2455
2456#if(DEBUG_PROCESS_INIT_CREATE & 1)
2457if( DEBUG_PROCESS_INIT_CREATE < cycle )
[610]2458printk("\n[%s] thread[%x,%x] registered init process in parent\n",
2459__FUNCTION__, this->process->pid, this->trdid );
[564]2460#endif
2461
[409]2462    // select a core in local cluster to execute the main thread
[637]2463    lid  = cluster_select_local_core( local_cxy );
[409]2464
2465    // initialize pthread attributes for main thread
2466    attr.attributes = PT_ATTR_DETACH | PT_ATTR_CLUSTER_DEFINED | PT_ATTR_CORE_DEFINED;
2467    attr.cxy        = local_cxy;
2468    attr.lid        = lid;
2469
2470    // create and initialize thread descriptor
2471        error = thread_user_create( pid,
2472                                (void *)process->vmm.entry_point,
2473                                NULL,
2474                                &attr,
2475                                &thread );
[1]2476
[625]2477    if( error )
2478    {
2479        printk("\n[PANIC] in %s : cannot create main thread\n", __FUNCTION__  );
2480        hal_core_sleep();
2481    }
2482    if( thread->trdid != 0 )
2483    {
2484        printk("\n[PANIC] in %s : bad main thread trdid\n", __FUNCTION__  );
2485        hal_core_sleep();
2486    }
[428]2487
[564]2488#if(DEBUG_PROCESS_INIT_CREATE & 1)
2489if( DEBUG_PROCESS_INIT_CREATE < cycle )
[610]2490printk("\n[%s] thread[%x,%x] created main thread\n",
2491__FUNCTION__, this->process->pid, this->trdid );
[564]2492#endif
2493
[409]2494    // activate thread
2495        thread_unblock( XPTR( local_cxy , thread ) , THREAD_BLOCKED_GLOBAL );
2496
[124]2497    hal_fence();
[1]2498
[438]2499#if DEBUG_PROCESS_INIT_CREATE
[433]2500cycle = (uint32_t)hal_get_cycles();
[438]2501if( DEBUG_PROCESS_INIT_CREATE < cycle )
[610]2502printk("\n[%s] thread[%x,%x] exit / cycle %d\n",
2503__FUNCTION__, this->process->pid, this->trdid, cycle );
[433]2504#endif
[409]2505
[204]2506}  // end process_init_create()
2507
[428]2508/////////////////////////////////////////
2509void process_display( xptr_t process_xp )
2510{
2511    process_t   * process_ptr;
2512    cxy_t         process_cxy;
[443]2513
[428]2514    xptr_t        parent_xp;       // extended pointer on parent process
2515    process_t   * parent_ptr;
2516    cxy_t         parent_cxy;
2517
[443]2518    xptr_t        owner_xp;        // extended pointer on owner process
2519    process_t   * owner_ptr;
2520    cxy_t         owner_cxy;
2521
[428]2522    pid_t         pid;
2523    pid_t         ppid;
[580]2524    lpid_t        lpid;
[428]2525    uint32_t      state;
2526    uint32_t      th_nr;
2527
[443]2528    xptr_t        txt_file_xp;     // extended pointer on TXT_RX file descriptor
2529    xptr_t        txt_chdev_xp;    // extended pointer on TXT_RX chdev
2530    chdev_t     * txt_chdev_ptr;
2531    cxy_t         txt_chdev_cxy;
2532    xptr_t        txt_owner_xp;    // extended pointer on TXT owner process
[428]2533
2534    xptr_t        elf_file_xp;     // extended pointer on .elf file
2535    cxy_t         elf_file_cxy;
2536    vfs_file_t  * elf_file_ptr;
2537    vfs_inode_t * elf_inode_ptr;   // local pointer on .elf inode
2538
2539    char          txt_name[CONFIG_VFS_MAX_NAME_LENGTH];
2540    char          elf_name[CONFIG_VFS_MAX_NAME_LENGTH];
2541
2542    // get cluster and local pointer on process
2543    process_ptr = GET_PTR( process_xp );
2544    process_cxy = GET_CXY( process_xp );
2545
[580]2546    // get process PID, LPID, and state
[564]2547    pid   = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid ) );
[580]2548    lpid  = LPID_FROM_PID( pid );
[564]2549    state = hal_remote_l32( XPTR( process_cxy , &process_ptr->term_state ) );
[428]2550
[580]2551    // get process PPID
[564]2552    parent_xp  = hal_remote_l64( XPTR( process_cxy , &process_ptr->parent_xp ) );
[428]2553    parent_cxy = GET_CXY( parent_xp );
2554    parent_ptr = GET_PTR( parent_xp );
[564]2555    ppid       = hal_remote_l32( XPTR( parent_cxy , &parent_ptr->pid ) );
[428]2556
2557    // get number of threads
[564]2558    th_nr      = hal_remote_l32( XPTR( process_cxy , &process_ptr->th_nr ) );
[428]2559
[443]2560    // get pointers on owner process descriptor
[564]2561    owner_xp  = hal_remote_l64( XPTR( process_cxy , &process_ptr->owner_xp ) );
[443]2562    owner_cxy = GET_CXY( owner_xp );
2563    owner_ptr = GET_PTR( owner_xp );
[428]2564
[580]2565    // get process TXT name and .elf name
2566    if( lpid )                                   // user process
2567    {
[443]2568
[580]2569        // get extended pointer on file descriptor associated to TXT_RX
2570        txt_file_xp = hal_remote_l64( XPTR( owner_cxy , &owner_ptr->fd_array.array[0] ) );
[428]2571
[669]2572        assert( __FUNCTION__, (txt_file_xp != XPTR_NULL) ,
[624]2573        "process must be attached to one TXT terminal" ); 
[443]2574
[580]2575        // get TXT_RX chdev pointers
2576        txt_chdev_xp  = chdev_from_file( txt_file_xp );
2577        txt_chdev_cxy = GET_CXY( txt_chdev_xp );
2578        txt_chdev_ptr = GET_PTR( txt_chdev_xp );
2579
2580        // get TXT_RX name and ownership
2581        hal_remote_strcpy( XPTR( local_cxy , txt_name ) ,
2582                           XPTR( txt_chdev_cxy , txt_chdev_ptr->name ) );
[428]2583   
[580]2584        txt_owner_xp = (xptr_t)hal_remote_l64( XPTR( txt_chdev_cxy, 
2585                                                     &txt_chdev_ptr->ext.txt.owner_xp ) );
[428]2586
[580]2587        // get process .elf name
2588        elf_file_xp   = hal_remote_l64( XPTR( process_cxy , &process_ptr->vfs_bin_xp ) );
2589        elf_file_cxy  = GET_CXY( elf_file_xp );
2590        elf_file_ptr  = GET_PTR( elf_file_xp );
2591        elf_inode_ptr = hal_remote_lpt( XPTR( elf_file_cxy , &elf_file_ptr->inode ) );
2592        vfs_inode_get_name( XPTR( elf_file_cxy , elf_inode_ptr ) , elf_name );
2593    }
2594    else                                         // kernel process_zero
2595    {
2596        // TXT name and .elf name are not registered in kernel process_zero
2597        strcpy( txt_name , "txt0_rx" );
2598        txt_owner_xp = process_xp; 
2599        strcpy( elf_name , "kernel.elf" );
2600    }
2601
[428]2602    // display process info
[443]2603    if( txt_owner_xp == process_xp )
[428]2604    {
[581]2605        nolock_printk("PID %X | %s (FG) | %X | PPID %X | TS %X | %d | %s\n", 
2606        pid, txt_name, process_ptr, ppid, state, th_nr, elf_name );
[428]2607    }
2608    else
2609    {
[581]2610        nolock_printk("PID %X | %s (BG) | %X | PPID %X | TS %X | %d | %s\n", 
2611        pid, txt_name, process_ptr, ppid, state, th_nr, elf_name );
[428]2612    }
2613}  // end process_display()
2614
2615
2616////////////////////////////////////////////////////////////////////////////////////////
2617//     Terminals related functions
2618////////////////////////////////////////////////////////////////////////////////////////
2619
[581]2620//////////////////////////////////
[485]2621uint32_t process_txt_alloc( void )
[428]2622{
2623    uint32_t  index;       // TXT terminal index
2624    xptr_t    chdev_xp;    // extended pointer on TXT_RX chdev
2625    chdev_t * chdev_ptr;   // local pointer on TXT_RX chdev
2626    cxy_t     chdev_cxy;   // TXT_RX chdev cluster
2627    xptr_t    root_xp;     // extended pointer on owner field in chdev
2628
2629    // scan the user TXT_RX chdevs (TXT0 is reserved for kernel)
2630    for( index = 1 ; index < LOCAL_CLUSTER->nb_txt_channels ; index ++ )
2631    {
2632        // get pointers on TXT_RX[index]
2633        chdev_xp  = chdev_dir.txt_rx[index];
2634        chdev_cxy = GET_CXY( chdev_xp );
2635        chdev_ptr = GET_PTR( chdev_xp );
2636
2637        // get extended pointer on root of attached process
2638        root_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.root );
2639
2640        // return free TXT index if found
2641        if( xlist_is_empty( root_xp ) ) return index; 
2642    }
2643
[669]2644    assert( __FUNCTION__, false , "no free TXT terminal found" );
[428]2645
2646    return -1;
2647
2648} // end process_txt_alloc()
2649
2650/////////////////////////////////////////////
[669]2651void process_txt_attach( xptr_t   process_xp,
2652                         uint32_t txt_id )
[428]2653{
2654    xptr_t      chdev_xp;     // extended pointer on TXT_RX chdev
2655    cxy_t       chdev_cxy;    // TXT_RX chdev cluster
2656    chdev_t *   chdev_ptr;    // local pointer on TXT_RX chdev
2657    xptr_t      root_xp;      // extended pointer on list root in chdev
2658    xptr_t      lock_xp;      // extended pointer on list lock in chdev
2659
[669]2660    process_t * process_ptr = GET_PTR(process_xp );
2661    cxy_t       process_cxy = GET_CXY(process_xp );
2662
[564]2663// check process is in owner cluster
[669]2664assert( __FUNCTION__, (process_xp == hal_remote_l64( XPTR( process_cxy , &process_ptr->owner_xp ))),
[564]2665"process descriptor not in owner cluster" );
[428]2666
[564]2667// check terminal index
[669]2668assert( __FUNCTION__, (txt_id < LOCAL_CLUSTER->nb_txt_channels) ,
[564]2669"illegal TXT terminal index" );
[428]2670
2671    // get pointers on TXT_RX[txt_id] chdev
2672    chdev_xp  = chdev_dir.txt_rx[txt_id];
2673    chdev_cxy = GET_CXY( chdev_xp );
2674    chdev_ptr = GET_PTR( chdev_xp );
2675
2676    // get extended pointer on root & lock of attached process list
2677    root_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.root );
2678    lock_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.lock );
2679
[564]2680    // get lock protecting list of processes attached to TXT
2681    remote_busylock_acquire( lock_xp );
2682
[669]2683    // insert owner process in list of attached processes to same TXT
2684    xlist_add_last( root_xp , XPTR( process_cxy , &process_ptr->txt_list ) );
[428]2685
[564]2686    // release lock protecting list of processes attached to TXT
2687    remote_busylock_release( lock_xp );
2688
[446]2689#if DEBUG_PROCESS_TXT
[610]2690thread_t * this = CURRENT_THREAD;
[457]2691uint32_t cycle = (uint32_t)hal_get_cycles();
[446]2692if( DEBUG_PROCESS_TXT < cycle )
[610]2693printk("\n[%s] thread[%x,%x] attached process %x to TXT %d / cycle %d\n",
[669]2694__FUNCTION__, this->process->pid, this->trdid,
2695hal_remote_l32( XPTR( process_cxy , &process_ptr->pid, txt_id , cycle );
[433]2696#endif
[428]2697
2698} // end process_txt_attach()
2699
[436]2700/////////////////////////////////////////////
2701void process_txt_detach( xptr_t  process_xp )
[428]2702{
[436]2703    process_t * process_ptr;  // local pointer on process in owner cluster
2704    cxy_t       process_cxy;  // process owner cluster
2705    pid_t       process_pid;  // process identifier
2706    xptr_t      file_xp;      // extended pointer on stdin file
[428]2707    xptr_t      chdev_xp;     // extended pointer on TXT_RX chdev
2708    cxy_t       chdev_cxy;    // TXT_RX chdev cluster
2709    chdev_t *   chdev_ptr;    // local pointer on TXT_RX chdev
2710    xptr_t      lock_xp;      // extended pointer on list lock in chdev
2711
[436]2712    // get process cluster, local pointer, and PID
2713    process_cxy = GET_CXY( process_xp );
2714    process_ptr = GET_PTR( process_xp );
[564]2715    process_pid = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid ) );
[436]2716
[564]2717// check process descriptor in owner cluster
[669]2718assert( __FUNCTION__, (CXY_FROM_PID( process_pid ) == process_cxy ) ,
[564]2719"process descriptor not in owner cluster" );
[436]2720
2721    // release TXT ownership (does nothing if not TXT owner)
2722    process_txt_transfer_ownership( process_xp );
[428]2723
[625]2724    // get extended pointer on process stdin pseudo file
[564]2725    file_xp = (xptr_t)hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[0] ) );
[436]2726
2727    // get pointers on TXT_RX chdev
2728    chdev_xp  = chdev_from_file( file_xp );
[428]2729    chdev_cxy = GET_CXY( chdev_xp );
2730    chdev_ptr = (chdev_t *)GET_PTR( chdev_xp );
2731
[436]2732    // get extended pointer on lock protecting attached process list
[428]2733    lock_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.lock );
2734
[564]2735    // get lock protecting list of processes attached to TXT
2736    remote_busylock_acquire( lock_xp );
2737
[428]2738    // unlink process from attached process list
[436]2739    xlist_unlink( XPTR( process_cxy , &process_ptr->txt_list ) );
2740
[564]2741    // release lock protecting list of processes attached to TXT
2742    remote_busylock_release( lock_xp );
2743
[446]2744#if DEBUG_PROCESS_TXT
[610]2745thread_t * this = CURRENT_THREAD;
[457]2746uint32_t cycle  = (uint32_t)hal_get_cycles();
[564]2747uint32_t txt_id = hal_remote_l32( XPTR( chdev_cxy , &chdev_ptr->channel ) );
[446]2748if( DEBUG_PROCESS_TXT < cycle )
[625]2749printk("\n[%s] thread[%x,%x] detached process %x from TXT%d / cycle %d\n",
[610]2750__FUNCTION__, this->process->pid, this->trdid, process_pid, txt_id, cycle );
[433]2751#endif
[428]2752
2753} // end process_txt_detach()
2754
2755///////////////////////////////////////////////////
[669]2756uint32_t process_txt_get_index( xptr_t process_xp )
2757{
2758
2759    // get target process cluster and local pointer
2760    process_t * process_ptr = GET_PTR( process_xp );
2761    cxy_t       process_cxy = GET_CXY( process_xp );
2762
2763assert( __FUNCTION__, (process_xp == hal_remote_l64( XPTR( process_cxy , &process_ptr->owner_xp))),
2764"process descriptor not in owner cluster" );
2765
2766    // get extended pointer on STDIN pseudo file in owner process descriptor
2767    xptr_t file_xp = hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[0]));
2768
2769assert( __FUNCTION__, (file_xp != XPTR_NULL),
2770"STDIN pseudo-file undefined in fd_array for process %x\n",
2771hal_remote_l32( XPTR( process_cxy , &process_ptr->pid ) ) );
2772
2773    // get extended pointer on TXT chdev
2774    xptr_t chdev_xp = chdev_from_file( file_xp );
2775 
2776assert( __FUNCTION__, (chdev_xp != XPTR_NULL),
2777"chdev undefined for STDIN pseudo-file of process %x\n",
2778hal_remote_l32( XPTR( process_cxy , &process_ptr->pid ) ) );
2779
2780    // get cluster and local pointer on chdev
2781   cxy_t     chdev_cxy = GET_CXY( chdev_xp );
2782   chdev_t * chdev_ptr = GET_PTR( chdev_xp );
2783 
2784   // get parent TXT terminal index
2785   return hal_remote_l32( XPTR( chdev_cxy , &chdev_ptr->channel ) );
2786
2787}  // end process_txt_get_index()
2788
2789///////////////////////////////////////////////////
[428]2790void process_txt_set_ownership( xptr_t process_xp )
2791{
2792    process_t * process_ptr;
2793    cxy_t       process_cxy;
2794    xptr_t      file_xp;
2795    xptr_t      txt_xp;     
2796    chdev_t   * txt_ptr;
2797    cxy_t       txt_cxy;
2798
[436]2799    // get pointers on process in owner cluster
[428]2800    process_cxy = GET_CXY( process_xp );
[435]2801    process_ptr = GET_PTR( process_xp );
[436]2802
2803    // check owner cluster
[669]2804    assert( __FUNCTION__, (process_xp == hal_remote_l64( XPTR( process_cxy , &process_ptr->owner_xp ))),
[624]2805    "process descriptor not in owner cluster" );
[436]2806
[428]2807    // get extended pointer on stdin pseudo file
[564]2808    file_xp = hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[0] ) );
[428]2809
2810    // get pointers on TXT chdev
2811    txt_xp  = chdev_from_file( file_xp );
2812    txt_cxy = GET_CXY( txt_xp );
[435]2813    txt_ptr = GET_PTR( txt_xp );
[428]2814
2815    // set owner field in TXT chdev
[564]2816    hal_remote_s64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , process_xp );
[428]2817
[446]2818#if DEBUG_PROCESS_TXT
[610]2819thread_t * this = CURRENT_THREAD;
[457]2820uint32_t cycle  = (uint32_t)hal_get_cycles();
[564]2821uint32_t txt_id = hal_remote_l32( XPTR( txt_cxy , &txt_ptr->channel ) );
[446]2822if( DEBUG_PROCESS_TXT < cycle )
[669]2823printk("\n[%s] thread[%x,%x] give TXT%d ownership to process / cycle %d\n",
2824__FUNCTION__, this->process->pid, this->trdid, txt_id, cycle );
[436]2825#endif
2826
[428]2827}  // end process_txt_set ownership()
2828
[436]2829////////////////////////////////////////////////////////
2830void process_txt_transfer_ownership( xptr_t process_xp )
[428]2831{
[436]2832    process_t * process_ptr;     // local pointer on process releasing ownership
2833    cxy_t       process_cxy;     // process cluster
2834    pid_t       process_pid;     // process identifier
[428]2835    xptr_t      file_xp;         // extended pointer on TXT_RX pseudo file
2836    xptr_t      txt_xp;          // extended pointer on TXT_RX chdev
[433]2837    chdev_t   * txt_ptr;         // local pointer on TXT_RX chdev
2838    cxy_t       txt_cxy;         // cluster of TXT_RX chdev
2839    uint32_t    txt_id;          // TXT_RX channel
[428]2840    xptr_t      owner_xp;        // extended pointer on current TXT_RX owner
2841    xptr_t      root_xp;         // extended pointer on root of attached process list
[436]2842    xptr_t      lock_xp;         // extended pointer on lock protecting attached process list
[428]2843    xptr_t      iter_xp;         // iterator for xlist
2844    xptr_t      current_xp;      // extended pointer on current process
[625]2845    bool_t      found;
[428]2846
[457]2847#if DEBUG_PROCESS_TXT
[610]2848thread_t * this  = CURRENT_THREAD;
2849uint32_t   cycle;
[457]2850#endif
2851
[625]2852    // get pointers on target process
[428]2853    process_cxy = GET_CXY( process_xp );
[435]2854    process_ptr = GET_PTR( process_xp );
[564]2855    process_pid = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid ) );
[436]2856
[625]2857// check owner cluster
[669]2858assert( __FUNCTION__, (process_cxy == CXY_FROM_PID( process_pid )) ,
[625]2859"process descriptor not in owner cluster" );
[436]2860
[428]2861    // get extended pointer on stdin pseudo file
[564]2862    file_xp = hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[0] ) );
[428]2863
2864    // get pointers on TXT chdev
2865    txt_xp  = chdev_from_file( file_xp );
2866    txt_cxy = GET_CXY( txt_xp );
[433]2867    txt_ptr = GET_PTR( txt_xp );
[428]2868
[625]2869    // get relevant infos from chdev descriptor
[564]2870    owner_xp = hal_remote_l64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) );
[625]2871    txt_id   = hal_remote_l32( XPTR( txt_cxy , &txt_ptr->channel ) );
[428]2872
[625]2873    // transfer ownership only if target process is the TXT owner
[436]2874    if( (owner_xp == process_xp) && (txt_id > 0) ) 
[428]2875    {
[436]2876        // get extended pointers on root and lock of attached processes list
2877        root_xp = XPTR( txt_cxy , &txt_ptr->ext.txt.root );
2878        lock_xp = XPTR( txt_cxy , &txt_ptr->ext.txt.lock );
[428]2879
[625]2880        if( process_get_ppid( process_xp ) != 1 )       // target process is not KSH
2881        {
2882            // get lock
2883            remote_busylock_acquire( lock_xp );
[436]2884
2885            // scan attached process list to find KSH process
[625]2886            found = false;
2887            for( iter_xp = hal_remote_l64( root_xp ) ;
2888                 (iter_xp != root_xp) && (found == false) ;
2889                 iter_xp = hal_remote_l64( iter_xp ) )
[436]2890            {
[625]2891                current_xp = XLIST_ELEMENT( iter_xp , process_t , txt_list );
[435]2892
[436]2893                if( process_get_ppid( current_xp ) == 1 )  // current is KSH
2894                {
2895                    // set owner field in TXT chdev
[564]2896                    hal_remote_s64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , current_xp );
[436]2897
[446]2898#if DEBUG_PROCESS_TXT
[610]2899cycle = (uint32_t)hal_get_cycles();
[446]2900if( DEBUG_PROCESS_TXT < cycle )
[625]2901printk("\n[%s] thread[%x,%x] transfered TXT%d ownership to KSH / cycle %d\n",
2902__FUNCTION__, this->process->pid, this->trdid, txt_id, cycle );
[436]2903#endif
[625]2904                    found = true;
[436]2905                }
2906            }
[625]2907
[436]2908            // release lock
[564]2909            remote_busylock_release( lock_xp );
[436]2910
[625]2911// It must exist a KSH process for each user TXT channel
[669]2912assert( __FUNCTION__, (found == true), "KSH process not found for TXT%d", txt_id );
[436]2913
2914        }
[625]2915        else                                           // target process is KSH
[436]2916        {
[625]2917            // get lock
2918            remote_busylock_acquire( lock_xp );
2919
[436]2920            // scan attached process list to find another process
[625]2921            found = false;
2922            for( iter_xp = hal_remote_l64( root_xp ) ;
2923                 (iter_xp != root_xp) && (found == false) ;
2924                 iter_xp = hal_remote_l64( iter_xp ) )
[428]2925            {
[436]2926                current_xp  = XLIST_ELEMENT( iter_xp , process_t , txt_list );
2927
2928                if( current_xp != process_xp )            // current is not KSH
2929                {
2930                    // set owner field in TXT chdev
[564]2931                    hal_remote_s64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , current_xp );
[436]2932
[446]2933#if DEBUG_PROCESS_TXT
[610]2934cycle  = (uint32_t)hal_get_cycles();
[625]2935cxy_t       current_cxy = GET_CXY( current_xp );
2936process_t * current_ptr = GET_PTR( current_xp );
2937uint32_t    new_pid     = hal_remote_l32( XPTR( current_cxy , &current_ptr->pid ) );
[446]2938if( DEBUG_PROCESS_TXT < cycle )
[625]2939printk("\n[%s] thread[%x,%x] transfered TXT%d ownership to process %x / cycle %d\n",
[610]2940__FUNCTION__,this->process->pid, this->trdid, txt_id, new_pid, cycle );
[436]2941#endif
[625]2942                    found = true;
[436]2943                }
[428]2944            }
[436]2945
2946            // release lock
[564]2947            remote_busylock_release( lock_xp );
[436]2948
2949            // no more owner for TXT if no other process found
[625]2950            if( found == false )
2951            {
2952                // set owner field in TXT chdev
2953                hal_remote_s64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , XPTR_NULL );
[436]2954
[446]2955#if DEBUG_PROCESS_TXT
[436]2956cycle = (uint32_t)hal_get_cycles();
[446]2957if( DEBUG_PROCESS_TXT < cycle )
[625]2958printk("\n[%s] thread[%x,%x] released TXT%d (no attached process) / cycle %d\n",
[610]2959__FUNCTION__, this->process->pid, this->trdid, txt_id, cycle );
[436]2960#endif
[625]2961            }
[428]2962        }
[436]2963    }
2964    else
2965    {
[433]2966
[446]2967#if DEBUG_PROCESS_TXT
[436]2968cycle = (uint32_t)hal_get_cycles();
[446]2969if( DEBUG_PROCESS_TXT < cycle )
[625]2970printk("\n[%s] thread[%x,%x] does nothing for process %x (not TXT owner) / cycle %d\n",
2971__FUNCTION__, this->process->pid, this->trdid, process_pid, cycle );
[436]2972#endif
2973
[428]2974    }
[625]2975
[436]2976}  // end process_txt_transfer_ownership()
[428]2977
2978
[564]2979////////////////////////////////////////////////
2980bool_t process_txt_is_owner( xptr_t process_xp )
[457]2981{
2982    // get local pointer and cluster of process in owner cluster
2983    cxy_t       process_cxy = GET_CXY( process_xp );
2984    process_t * process_ptr = GET_PTR( process_xp );
2985
[564]2986// check calling thread execute in target process owner cluster
2987pid_t process_pid = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid ) );
[669]2988assert( __FUNCTION__, (process_cxy == CXY_FROM_PID( process_pid )) ,
[624]2989"process descriptor not in owner cluster" );
[457]2990
2991    // get extended pointer on stdin pseudo file
[564]2992    xptr_t file_xp = hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[0] ) );
[457]2993
2994    // get pointers on TXT chdev
2995    xptr_t    txt_xp  = chdev_from_file( file_xp );
2996    cxy_t     txt_cxy = GET_CXY( txt_xp );
2997    chdev_t * txt_ptr = GET_PTR( txt_xp );
2998
2999    // get extended pointer on TXT_RX owner process
[564]3000    xptr_t owner_xp = hal_remote_l64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) );
[457]3001
3002    return (process_xp == owner_xp);
3003
3004}   // end process_txt_is_owner()
3005
[436]3006////////////////////////////////////////////////     
3007xptr_t process_txt_get_owner( uint32_t channel )
[435]3008{
3009    xptr_t      txt_rx_xp  = chdev_dir.txt_rx[channel];
3010    cxy_t       txt_rx_cxy = GET_CXY( txt_rx_xp );
3011    chdev_t *   txt_rx_ptr = GET_PTR( txt_rx_xp );
3012
[564]3013    return (xptr_t)hal_remote_l64( XPTR( txt_rx_cxy , &txt_rx_ptr->ext.txt.owner_xp ) );
[435]3014
[457]3015}  // end process_txt_get_owner()
3016
[435]3017///////////////////////////////////////////
3018void process_txt_display( uint32_t txt_id )
3019{
3020    xptr_t      chdev_xp;
3021    cxy_t       chdev_cxy;
3022    chdev_t   * chdev_ptr;
3023    xptr_t      root_xp;
3024    xptr_t      lock_xp;
3025    xptr_t      current_xp;
3026    xptr_t      iter_xp;
[443]3027    cxy_t       txt0_cxy;
3028    chdev_t   * txt0_ptr;
3029    xptr_t      txt0_xp;
3030    xptr_t      txt0_lock_xp;
3031   
[669]3032    assert( __FUNCTION__, (txt_id < LOCAL_CLUSTER->nb_txt_channels) ,
[492]3033    "illegal TXT terminal index" );
[435]3034
[443]3035    // get pointers on TXT0 chdev
3036    txt0_xp  = chdev_dir.txt_tx[0];
3037    txt0_cxy = GET_CXY( txt0_xp );
3038    txt0_ptr = GET_PTR( txt0_xp );
3039
3040    // get extended pointer on TXT0 lock
3041    txt0_lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock );
3042
[435]3043    // get pointers on TXT_RX[txt_id] chdev
3044    chdev_xp  = chdev_dir.txt_rx[txt_id];
3045    chdev_cxy = GET_CXY( chdev_xp );
3046    chdev_ptr = GET_PTR( chdev_xp );
3047
3048    // get extended pointer on root & lock of attached process list
3049    root_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.root );
3050    lock_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.lock );
3051
[443]3052    // get lock on attached process list
[564]3053    remote_busylock_acquire( lock_xp );
[443]3054
3055    // get TXT0 lock in busy waiting mode
[564]3056    remote_busylock_acquire( txt0_lock_xp );
[443]3057
[435]3058    // display header
[443]3059    nolock_printk("\n***** processes attached to TXT_%d / cycle %d\n",
3060    txt_id , (uint32_t)hal_get_cycles() );
[435]3061
[436]3062    // scan attached process list
[435]3063    XLIST_FOREACH( root_xp , iter_xp )
3064    {
3065        current_xp  = XLIST_ELEMENT( iter_xp , process_t , txt_list );
3066        process_display( current_xp );
3067    }
3068
[443]3069    // release TXT0 lock in busy waiting mode
[564]3070    remote_busylock_release( txt0_lock_xp );
[443]3071
3072    // release lock on attached process list
[564]3073    remote_busylock_release( lock_xp );
[435]3074
3075}  // end process_txt_display
Note: See TracBrowser for help on using the repository browser.