source: trunk/kernel/kern/process.c @ 611

Last change on this file since 611 was 611, checked in by alain, 5 years ago

Introduce sigificant modifs in VFS to support the <ls> command,
and the . and .. directories entries.

File size: 77.5 KB
RevLine 
[1]1/*
[564]2 * process.c - process related functions definition.
[172]3 *
[1]4 * Authors  Ghassan Almaless (2008,2009,2010,2011,2012)
5 *          Mohamed Lamine Karaoui (2015)
[433]6 *          Alain Greiner (2016,2017,2018)
[1]7 *
8 * Copyright (c) UPMC Sorbonne Universites
9 *
[409]10 * This file is part of ALMOS-MKH.
[1]11 *
[172]12 * ALMOS-MKH is free software; you can redistribute it and/or modify it
[1]13 * under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; version 2.0 of the License.
15 *
[172]16 * ALMOS-MKH is distributed in the hope that it will be useful, but
[1]17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
[172]22 * along with ALMOS-MKH; if not, write to the Free Software Foundation,
[1]23 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24 */
25
[14]26#include <kernel_config.h>
[457]27#include <hal_kernel_types.h>
[1]28#include <hal_remote.h>
29#include <hal_uspace.h>
[409]30#include <hal_irqmask.h>
[1]31#include <errno.h>
32#include <printk.h>
33#include <memcpy.h>
34#include <bits.h>
35#include <kmem.h>
36#include <page.h>
37#include <vmm.h>
38#include <vfs.h>
39#include <core.h>
40#include <thread.h>
[428]41#include <chdev.h>
[1]42#include <list.h>
[407]43#include <string.h>
[1]44#include <scheduler.h>
[564]45#include <busylock.h>
46#include <queuelock.h>
47#include <remote_queuelock.h>
48#include <rwlock.h>
49#include <remote_rwlock.h>
[1]50#include <dqdt.h>
51#include <cluster.h>
52#include <ppm.h>
53#include <boot_info.h>
54#include <process.h>
55#include <elf.h>
[23]56#include <syscalls.h>
[435]57#include <shared_syscalls.h>
[1]58
59//////////////////////////////////////////////////////////////////////////////////////////
60// Extern global variables
61//////////////////////////////////////////////////////////////////////////////////////////
62
[428]63extern process_t           process_zero;     // allocated in kernel_init.c
64extern chdev_directory_t   chdev_dir;        // allocated in kernel_init.c
[1]65
66//////////////////////////////////////////////////////////////////////////////////////////
67// Process initialisation related functions
68//////////////////////////////////////////////////////////////////////////////////////////
69
[583]70/////////////////////////////////
[503]71process_t * process_alloc( void )
[1]72{
73        kmem_req_t   req;
74
75    req.type  = KMEM_PROCESS;
76        req.size  = sizeof(process_t);
77        req.flags = AF_KERNEL;
78
79    return (process_t *)kmem_alloc( &req );
80}
81
82////////////////////////////////////////
83void process_free( process_t * process )
84{
85    kmem_req_t  req;
86
87        req.type = KMEM_PROCESS;
88        req.ptr  = process;
89        kmem_free( &req );
90}
91
[101]92/////////////////////////////////////////////////
93void process_reference_init( process_t * process,
94                             pid_t       pid,
[457]95                             xptr_t      parent_xp )
[1]96{
[610]97    xptr_t      process_xp;
[428]98    cxy_t       parent_cxy;
99    process_t * parent_ptr;
[407]100    xptr_t      stdin_xp;
101    xptr_t      stdout_xp;
102    xptr_t      stderr_xp;
103    uint32_t    stdin_id;
104    uint32_t    stdout_id;
105    uint32_t    stderr_id;
[415]106    error_t     error;
[428]107    uint32_t    txt_id;
108    char        rx_path[40];
109    char        tx_path[40];
[440]110    xptr_t      file_xp;
[428]111    xptr_t      chdev_xp;
112    chdev_t *   chdev_ptr;
113    cxy_t       chdev_cxy;
114    pid_t       parent_pid;
[1]115
[610]116    // build extended pointer on this reference process
117    process_xp = XPTR( local_cxy , process );
118
[428]119    // get parent process cluster and local pointer
120    parent_cxy = GET_CXY( parent_xp );
[435]121    parent_ptr = GET_PTR( parent_xp );
[204]122
[457]123    // get parent_pid
[564]124    parent_pid = hal_remote_l32( XPTR( parent_cxy , &parent_ptr->pid ) );
[428]125
[438]126#if DEBUG_PROCESS_REFERENCE_INIT
[610]127thread_t * this = CURRENT_THREAD;
[433]128uint32_t cycle = (uint32_t)hal_get_cycles();
[610]129if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
130printk("\n[%s] thread[%x,%x] enter to initalialize process %x / cycle %d\n",
131__FUNCTION__, parent_pid, this->trdid, pid, cycle );
[433]132#endif
[428]133
[610]134    // initialize pid, ref_xp, parent_xp, owner_xp, term_state fields
[433]135        process->pid        = pid;
136    process->ref_xp     = XPTR( local_cxy , process );
[443]137    process->owner_xp   = XPTR( local_cxy , process );
[433]138    process->parent_xp  = parent_xp;
139    process->term_state = 0;
[428]140
[610]141    // initialize VFS root inode and CWD inode
142    process->vfs_root_xp = hal_remote_l64( XPTR( parent_cxy, &parent_ptr->vfs_root_xp ) );
143    process->cwd_xp      = hal_remote_l64( XPTR( parent_cxy, &parent_ptr->cwd_xp ) );
144
[409]145    // initialize vmm as empty
[415]146    error = vmm_init( process );
[564]147
148assert( (error == 0) , "cannot initialize VMM\n" );
[415]149 
[438]150#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
[433]151cycle = (uint32_t)hal_get_cycles();
[610]152if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
153printk("\n[%s] thread[%x,%x] / vmm empty for process %x / cycle %d\n", 
154__FUNCTION__, parent_pid, this->trdid, pid, cycle );
[433]155#endif
[1]156
[409]157    // initialize fd_array as empty
[408]158    process_fd_init( process );
[1]159
[428]160    // define the stdin/stdout/stderr pseudo files <=> select a TXT terminal.
[581]161    if( (pid == 1) || (parent_pid  == 1) )      // INIT or KSH  process
[408]162    {
[581]163        // select a TXT channel
164        if( pid == 1 )  txt_id = 0;                     // INIT
165        else            txt_id = process_txt_alloc();   // KSH
[428]166
[457]167        // attach process to TXT
[428]168        process_txt_attach( process , txt_id ); 
169
[457]170#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
171cycle = (uint32_t)hal_get_cycles();
[610]172if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
173printk("\n[%s] thread[%x,%x] / process %x attached to TXT%d / cycle %d\n", 
174__FUNCTION__, parent_pid, this->trdid, pid, txt_id, cycle );
[457]175#endif
[428]176        // build path to TXT_RX[i] and TXT_TX[i] chdevs
177        snprintf( rx_path , 40 , "/dev/external/txt%d_rx", txt_id );
178        snprintf( tx_path , 40 , "/dev/external/txt%d_tx", txt_id );
179
180        // create stdin pseudo file         
[610]181        error = vfs_open(  process->vfs_root_xp,
[428]182                           rx_path,
[610]183                           process_xp,
[408]184                           O_RDONLY, 
185                           0,                // FIXME chmod
186                           &stdin_xp, 
187                           &stdin_id );
[1]188
[564]189assert( (error == 0) , "cannot open stdin pseudo file" );
190assert( (stdin_id == 0) , "stdin index must be 0" );
[428]191
[440]192#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
193cycle = (uint32_t)hal_get_cycles();
[610]194if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
195printk("\n[%s] thread[%x,%x] / stdin open for process %x / cycle %d\n", 
196__FUNCTION__, parent_pid, this->trdid, pid, cycle );
[440]197#endif
198
[428]199        // create stdout pseudo file         
[610]200        error = vfs_open(  process->vfs_root_xp,
[428]201                           tx_path,
[610]202                           process_xp,
[408]203                           O_WRONLY, 
204                           0,                // FIXME chmod
205                           &stdout_xp, 
206                           &stdout_id );
[1]207
[492]208        assert( (error == 0) , "cannot open stdout pseudo file" );
209        assert( (stdout_id == 1) , "stdout index must be 1" );
[428]210
[440]211#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
212cycle = (uint32_t)hal_get_cycles();
[610]213if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
214printk("\n[%s] thread[%x,%x] / stdout open for process %x / cycle %d\n", 
215__FUNCTION__, parent_pid, this->trdid, pid, cycle );
[440]216#endif
217
[428]218        // create stderr pseudo file         
[610]219        error = vfs_open(  process->vfs_root_xp,
[428]220                           tx_path,
[610]221                           process_xp,
[408]222                           O_WRONLY, 
223                           0,                // FIXME chmod
224                           &stderr_xp, 
225                           &stderr_id );
[428]226
[492]227        assert( (error == 0) , "cannot open stderr pseudo file" );
228        assert( (stderr_id == 2) , "stderr index must be 2" );
[428]229
[440]230#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
231cycle = (uint32_t)hal_get_cycles();
[610]232if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
233printk("\n[%s] thread[%x,%x] / stderr open for process %x / cycle %d\n", 
234__FUNCTION__, parent_pid, this->trdid, pid, cycle );
[440]235#endif
236
[408]237    }
[428]238    else                                            // normal user process
[408]239    {
[457]240        // get extended pointer on stdin pseudo file in parent process
[564]241        file_xp = (xptr_t)hal_remote_l64( XPTR( parent_cxy , &parent_ptr->fd_array.array[0] ) );
[440]242
[457]243        // get extended pointer on parent process TXT chdev
[440]244        chdev_xp = chdev_from_file( file_xp );
[428]245 
246        // get cluster and local pointer on chdev
247        chdev_cxy = GET_CXY( chdev_xp );
[435]248        chdev_ptr = GET_PTR( chdev_xp );
[428]249 
[564]250        // get parent process TXT terminal index
251        txt_id = hal_remote_l32( XPTR( chdev_cxy , &chdev_ptr->channel ) );
[407]252
[564]253        // attach child process to parent process TXT terminal
[428]254        process_txt_attach( process , txt_id ); 
[407]255
[457]256        // copy all open files from parent process fd_array to this process
[428]257        process_fd_remote_copy( XPTR( local_cxy , &process->fd_array ),
[457]258                                XPTR( parent_cxy , &parent_ptr->fd_array ) );
[408]259    }
[407]260
[610]261    // initialize lock protecting CWD changes
262    remote_busylock_init( XPTR( local_cxy , &process->cwd_lock ), LOCK_PROCESS_CWD );
[408]263
[438]264#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
[433]265cycle = (uint32_t)hal_get_cycles();
[610]266if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
267printk("\n[%s] thread[%x,%x] / set fd_array for process %x / cycle %d\n", 
268__FUNCTION__, parent_pid, this->trdid, pid , cycle );
[433]269#endif
[407]270
[408]271    // reset children list root
272    xlist_root_init( XPTR( local_cxy , &process->children_root ) );
273    process->children_nr     = 0;
[564]274    remote_queuelock_init( XPTR( local_cxy , &process->children_lock ), LOCK_PROCESS_CHILDREN );
[407]275
[611]276    // reset semaphore / mutex / barrier / condvar list roots and lock
[408]277    xlist_root_init( XPTR( local_cxy , &process->sem_root ) );
278    xlist_root_init( XPTR( local_cxy , &process->mutex_root ) );
279    xlist_root_init( XPTR( local_cxy , &process->barrier_root ) );
280    xlist_root_init( XPTR( local_cxy , &process->condvar_root ) );
[564]281    remote_queuelock_init( XPTR( local_cxy , &process->sync_lock ), LOCK_PROCESS_USERSYNC );
[407]282
[611]283    // reset open directories root and lock
284    xlist_root_init( XPTR( local_cxy , &process->dir_root ) );
285    remote_queuelock_init( XPTR( local_cxy , &process->dir_lock ), LOCK_PROCESS_DIR );
286
[408]287    // register new process in the local cluster manager pref_tbl[]
288    lpid_t lpid = LPID_FROM_PID( pid );
289    LOCAL_CLUSTER->pmgr.pref_tbl[lpid] = XPTR( local_cxy , process );
[407]290
[408]291    // register new process descriptor in local cluster manager local_list
292    cluster_process_local_link( process );
[407]293
[408]294    // register new process descriptor in local cluster manager copies_list
295    cluster_process_copies_link( process );
[172]296
[564]297    // initialize th_tbl[] array and associated threads
[1]298    uint32_t i;
[564]299
300    for( i = 0 ; i < CONFIG_THREADS_MAX_PER_CLUSTER ; i++ )
[1]301        {
302        process->th_tbl[i] = NULL;
303    }
304    process->th_nr  = 0;
[564]305    rwlock_init( &process->th_lock , LOCK_PROCESS_THTBL );
[1]306
[124]307        hal_fence();
[1]308
[438]309#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
[433]310cycle = (uint32_t)hal_get_cycles();
[610]311if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
312printk("\n[%s] thread[%x,%x] exit for process %x / cycle %d\n", 
313__FUNCTION__, parent_pid, this->trdid, pid, cycle );
[433]314#endif
[101]315
[428]316}  // process_reference_init()
[204]317
[1]318/////////////////////////////////////////////////////
319error_t process_copy_init( process_t * local_process,
320                           xptr_t      reference_process_xp )
321{
[415]322    error_t error;
323
[23]324    // get reference process cluster and local pointer
325    cxy_t       ref_cxy = GET_CXY( reference_process_xp );
[435]326    process_t * ref_ptr = GET_PTR( reference_process_xp );
[1]327
[428]328    // initialize PID, REF_XP, PARENT_XP, and STATE
[564]329    local_process->pid        = hal_remote_l32(  XPTR( ref_cxy , &ref_ptr->pid ) );
330    local_process->parent_xp  = hal_remote_l64( XPTR( ref_cxy , &ref_ptr->parent_xp ) );
[433]331    local_process->ref_xp     = reference_process_xp;
[443]332    local_process->owner_xp   = reference_process_xp;
[433]333    local_process->term_state = 0;
[407]334
[564]335#if DEBUG_PROCESS_COPY_INIT
[610]336thread_t * this = CURRENT_THREAD; 
[433]337uint32_t cycle = (uint32_t)hal_get_cycles();
[610]338if( DEBUG_PROCESS_COPY_INIT < cycle )
339printk("\n[%s] thread[%x,%x] enter for process %x / cycle %d\n",
340__FUNCTION__, this->process->pid, this->trdid, local_process->pid, cycle );
[433]341#endif
[407]342
[564]343// check user process
344assert( (local_process->pid != 0), "PID cannot be 0" );
345
[172]346    // reset local process vmm
[415]347    error = vmm_init( local_process );
[492]348    assert( (error == 0) , "cannot initialize VMM\n");
[1]349
[172]350    // reset process file descriptors array
[23]351        process_fd_init( local_process );
[1]352
[610]353    // reset vfs_root_xp / vfs_bin_xp / cwd_xp fields
[564]354    local_process->vfs_root_xp = hal_remote_l64( XPTR( ref_cxy , &ref_ptr->vfs_root_xp ) );
355    local_process->vfs_bin_xp  = hal_remote_l64( XPTR( ref_cxy , &ref_ptr->vfs_bin_xp ) );
[610]356    local_process->cwd_xp      = XPTR_NULL;
[1]357
358    // reset children list root (not used in a process descriptor copy)
359    xlist_root_init( XPTR( local_cxy , &local_process->children_root ) );
[172]360    local_process->children_nr   = 0;
[564]361    remote_queuelock_init( XPTR( local_cxy , &local_process->children_lock ),
362                           LOCK_PROCESS_CHILDREN );
[1]363
[428]364    // reset children_list (not used in a process descriptor copy)
365    xlist_entry_init( XPTR( local_cxy , &local_process->children_list ) );
[1]366
367    // reset semaphores list root (not used in a process descriptor copy)
368    xlist_root_init( XPTR( local_cxy , &local_process->sem_root ) );
[23]369    xlist_root_init( XPTR( local_cxy , &local_process->mutex_root ) );
370    xlist_root_init( XPTR( local_cxy , &local_process->barrier_root ) );
371    xlist_root_init( XPTR( local_cxy , &local_process->condvar_root ) );
[1]372
[564]373    // initialize th_tbl[] array and associated fields
[1]374    uint32_t i;
[564]375    for( i = 0 ; i < CONFIG_THREADS_MAX_PER_CLUSTER ; i++ )
[1]376        {
377        local_process->th_tbl[i] = NULL;
378    }
379    local_process->th_nr  = 0;
[564]380    rwlock_init( &local_process->th_lock , LOCK_PROCESS_THTBL );
[1]381
[564]382
[1]383    // register new process descriptor in local cluster manager local_list
384    cluster_process_local_link( local_process );
385
386    // register new process descriptor in owner cluster manager copies_list
387    cluster_process_copies_link( local_process );
388
[124]389        hal_fence();
[1]390
[438]391#if DEBUG_PROCESS_COPY_INIT
[433]392cycle = (uint32_t)hal_get_cycles();
[610]393if( DEBUG_PROCESS_COPY_INIT < cycle )
394printk("\n[%s] thread[%x,%x] exit for process %x / cycle %d\n",
395__FUNCTION__, this->process->pid, this->trdid, local_process->pid, cycle );
[433]396#endif
[279]397
[1]398    return 0;
399
[204]400} // end process_copy_init()
401
[1]402///////////////////////////////////////////
403void process_destroy( process_t * process )
404{
[428]405    xptr_t      parent_xp;
406    process_t * parent_ptr;
407    cxy_t       parent_cxy;
408    xptr_t      children_lock_xp;
[446]409    xptr_t      children_nr_xp;
[1]410
[437]411    pid_t       pid = process->pid;
412
[593]413// check no more threads
414assert( (process->th_nr == 0) , "process %x in cluster %x contains threads", pid , local_cxy );
[428]415
[438]416#if DEBUG_PROCESS_DESTROY
[610]417thread_t * this = CURRENT_THREAD;
[433]418uint32_t cycle = (uint32_t)hal_get_cycles();
[610]419if( DEBUG_PROCESS_DESTROY < cycle )
420printk("\n[%s] thread[%x,%x] enter for process %x in cluster %x / cycle %d\n",
421__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy, cycle );
[433]422#endif
[428]423
[436]424    // remove process from local_list in local cluster manager
425    cluster_process_local_unlink( process );
[1]426
[436]427    // remove process from copies_list in owner cluster manager
428    cluster_process_copies_unlink( process );
[23]429
[450]430    // remove process from children_list
431    // and release PID if owner cluster
[437]432    if( CXY_FROM_PID( pid ) == local_cxy )
[428]433    {
434        // get pointers on parent process
435        parent_xp  = process->parent_xp;
436        parent_cxy = GET_CXY( parent_xp );
437        parent_ptr = GET_PTR( parent_xp );
438
439        // get extended pointer on children_lock in parent process
440        children_lock_xp = XPTR( parent_cxy , &parent_ptr->children_lock );
[446]441        children_nr_xp   = XPTR( parent_cxy , &parent_ptr->children_nr );
[428]442
443        // remove process from children_list
[564]444        remote_queuelock_acquire( children_lock_xp );
[428]445        xlist_unlink( XPTR( local_cxy , &process->children_list ) );
[446]446            hal_remote_atomic_add( children_nr_xp , -1 );
[564]447        remote_queuelock_release( children_lock_xp );
[450]448
[564]449        // release the process PID to cluster manager
450        cluster_pid_release( pid );
[428]451    }
452
[564]453    // FIXME close all open files and synchronize dirty [AG]
[23]454
[428]455    // decrease refcount for bin file, root file and cwd file
[337]456        if( process->vfs_bin_xp  != XPTR_NULL ) vfs_file_count_down( process->vfs_bin_xp );
457        if( process->vfs_root_xp != XPTR_NULL ) vfs_file_count_down( process->vfs_root_xp );
[610]458        if( process->cwd_xp      != XPTR_NULL ) vfs_file_count_down( process->cwd_xp );
[1]459
460    // Destroy VMM
461    vmm_destroy( process );
462
[416]463    // release memory allocated to process descriptor
464    process_free( process );
[1]465
[438]466#if DEBUG_PROCESS_DESTROY
[433]467cycle = (uint32_t)hal_get_cycles();
[610]468if( DEBUG_PROCESS_DESTROY < cycle )
469printk("\n[%s] thread[%x,%x] exit / process %x in cluster %x / cycle %d\n",
470__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy, cycle );
[433]471#endif
[428]472
[407]473}  // end process_destroy()
474
[583]475///////////////////////////////////////////////////////////////////
[527]476const char * process_action_str( process_sigactions_t action_type )
[409]477{
[583]478    switch ( action_type )
479    {
480        case BLOCK_ALL_THREADS:   return "BLOCK";
481        case UNBLOCK_ALL_THREADS: return "UNBLOCK";
482        case DELETE_ALL_THREADS:  return "DELETE";
483        default:                  return "undefined";
484    }
[409]485}
486
[435]487////////////////////////////////////////
488void process_sigaction( pid_t       pid,
[457]489                        uint32_t    type )
[409]490{
491    cxy_t              owner_cxy;         // owner cluster identifier
492    lpid_t             lpid;              // process index in owner cluster
493    cluster_t        * cluster;           // pointer on cluster manager
494    xptr_t             root_xp;           // extended pointer on root of copies
495    xptr_t             lock_xp;           // extended pointer on lock protecting copies
496    xptr_t             iter_xp;           // iterator on copies list
497    xptr_t             process_xp;        // extended pointer on process copy
498    cxy_t              process_cxy;       // process copy cluster identifier
[457]499    process_t        * process_ptr;       // local pointer on process copy
[436]500    reg_t              save_sr;           // for critical section
501    rpc_desc_t         rpc;               // shared RPC descriptor
[457]502    thread_t         * client;            // pointer on client thread
503    xptr_t             client_xp;         // extended pointer on client thread
504    process_t        * local;             // pointer on process copy in local cluster
505    uint32_t           remote_nr;         // number of remote process copies
[409]506
[457]507    client    = CURRENT_THREAD;
508    client_xp = XPTR( local_cxy , client );
509    local     = NULL;
510    remote_nr = 0;
[435]511
[583]512    // check calling thread can yield
513    thread_assert_can_yield( client , __FUNCTION__ );
[564]514
[438]515#if DEBUG_PROCESS_SIGACTION
[433]516uint32_t cycle = (uint32_t)hal_get_cycles();
[438]517if( DEBUG_PROCESS_SIGACTION < cycle )
[593]518printk("\n[%s] thread[%x,%x] enter to %s process %x / cycle %d\n",
[583]519__FUNCTION__ , client->process->pid, client->trdid,
[457]520process_action_str( type ) , pid , cycle );
[433]521#endif
[409]522
[436]523    // get pointer on local cluster manager
[416]524    cluster = LOCAL_CLUSTER;
525
[409]526    // get owner cluster identifier and process lpid
[435]527    owner_cxy = CXY_FROM_PID( pid );
528    lpid      = LPID_FROM_PID( pid );
[409]529
[593]530    // get root of list of copies and lock from owner cluster
[436]531    root_xp   = XPTR( owner_cxy , &cluster->pmgr.copies_root[lpid] );
532    lock_xp   = XPTR( owner_cxy , &cluster->pmgr.copies_lock[lpid] );
[435]533
[583]534// check action type
535assert( ((type == DELETE_ALL_THREADS ) ||
536         (type == BLOCK_ALL_THREADS )  ||
537         (type == UNBLOCK_ALL_THREADS )), "illegal action type" );
[416]538             
[593]539    // This client thread send parallel RPCs to all remote clusters containing
[564]540    // target process copies, wait all responses, and then handles directly
541    // the threads in local cluster, when required.
[457]542    // The client thread allocates a - shared - RPC descriptor in the stack,
543    // because all parallel, non-blocking, server threads use the same input
544    // arguments, and use the shared RPC response field
[436]545
546    // mask IRQs
547    hal_disable_irq( &save_sr);
548
[457]549    // client thread blocks itself
550    thread_block( client_xp , THREAD_BLOCKED_RPC );
[436]551
552    // initialize shared RPC descriptor
[438]553    rpc.responses = 0;
554    rpc.blocking  = false;
555    rpc.index     = RPC_PROCESS_SIGACTION;
556    rpc.thread    = client;
557    rpc.lid       = client->core->lid;
[611]558    rpc.args[0]   = pid;
559    rpc.args[1]   = type;
[436]560
[611]561    // take the lock protecting process copies
562    remote_queuelock_acquire( lock_xp );
563
[457]564    // scan list of process copies
[409]565    XLIST_FOREACH( root_xp , iter_xp )
566    {
[457]567        // get extended pointers and cluster on process
[440]568        process_xp  = XLIST_ELEMENT( iter_xp , process_t , copies_list );
569        process_cxy = GET_CXY( process_xp );
[457]570        process_ptr = GET_PTR( process_xp );
[440]571
[593]572        if( process_cxy == local_cxy )    // process copy is local
[457]573        { 
574            local = process_ptr;
575        }
[593]576        else                              // process copy is remote
[457]577        {
578            // update number of remote process copies
579            remote_nr++;
580
581            // atomically increment responses counter
582            hal_atomic_add( (void *)&rpc.responses , 1 );
583
[438]584#if DEBUG_PROCESS_SIGACTION
585if( DEBUG_PROCESS_SIGACTION < cycle )
[593]586printk("\n[%s] thread[%x,%x] send RPC to cluster %x for process %x\n",
[583]587__FUNCTION__, client->process->pid, client->trdid, process_cxy, pid );
[433]588#endif
[457]589            // call RPC in target cluster
590            rpc_process_sigaction_client( process_cxy , &rpc );
591        }
592    }  // end list of copies
593
[409]594    // release the lock protecting process copies
[564]595    remote_queuelock_release( lock_xp );
[409]596
[436]597    // restore IRQs
598    hal_restore_irq( save_sr);
[409]599
[457]600    // - if there is remote process copies, the client thread deschedules,
601    //   (it will be unblocked by the last RPC server thread).
602    // - if there is no remote copies, the client thread unblock itself.
603    if( remote_nr )
604    {
605        sched_yield("blocked on rpc_process_sigaction");
606    } 
607    else
608    {
609        thread_unblock( client_xp , THREAD_BLOCKED_RPC );
610    }
[409]611
[457]612    // handle the local process copy if required
613    if( local != NULL )
614    {
615
616#if DEBUG_PROCESS_SIGACTION
617if( DEBUG_PROCESS_SIGACTION < cycle )
[593]618printk("\n[%s] thread[%x,%x] handles local process %x in cluster %x\n",
[583]619__FUNCTION__, client->process->pid, client->trdid, pid , local_cxy );
[457]620#endif
621        if     (type == DELETE_ALL_THREADS  ) process_delete_threads ( local , client_xp ); 
[583]622        else if(type == BLOCK_ALL_THREADS   ) process_block_threads  ( local ); 
[457]623        else if(type == UNBLOCK_ALL_THREADS ) process_unblock_threads( local );
624    }
625
[438]626#if DEBUG_PROCESS_SIGACTION
[433]627cycle = (uint32_t)hal_get_cycles();
[438]628if( DEBUG_PROCESS_SIGACTION < cycle )
[593]629printk("\n[%s] thread[%x,%x] exit after %s process %x / cycle %d\n",
[583]630__FUNCTION__, client->process->pid, client->trdid,
[457]631process_action_str( type ), pid, cycle );
[433]632#endif
[416]633
[409]634}  // end process_sigaction()
635
[433]636/////////////////////////////////////////////////
[583]637void process_block_threads( process_t * process )
[1]638{
[409]639    thread_t          * target;         // pointer on target thread
[433]640    thread_t          * this;           // pointer on calling thread
[564]641    uint32_t            ltid;           // index in process th_tbl[]
[436]642    cxy_t               owner_cxy;      // target process owner cluster
[409]643    uint32_t            count;          // requests counter
[593]644    volatile uint32_t   ack_count;      // acknowledges counter
[1]645
[416]646    // get calling thread pointer
[433]647    this = CURRENT_THREAD;
[407]648
[438]649#if DEBUG_PROCESS_SIGACTION
[564]650pid_t pid = process->pid;
[433]651uint32_t cycle = (uint32_t)hal_get_cycles();
[438]652if( DEBUG_PROCESS_SIGACTION < cycle )
[593]653printk("\n[%s] thread[%x,%x] enter for process %x in cluster %x / cycle %d\n",
[583]654__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy , cycle );
[433]655#endif
[409]656
[564]657// check target process is an user process
[583]658assert( (LPID_FROM_PID( process->pid ) != 0 ), "target process must be an user process" );
[564]659
[610]660    // get target process owner cluster
[564]661    owner_cxy = CXY_FROM_PID( process->pid );
662
[409]663    // get lock protecting process th_tbl[]
[564]664    rwlock_rd_acquire( &process->th_lock );
[1]665
[440]666    // loop on target process local threads
[409]667    // we use both "ltid" and "count" because it can exist "holes" in th_tbl
[593]668    // - if the calling thread and the target thread are not running on the same
669    //   core, we ask the target scheduler to acknowlege the blocking
670    //   to be sure that the target thread is not running.
671    // - if the calling thread and the target thread are running on the same core,
672    //   we don't need confirmation from scheduler.
673           
[436]674    for( ltid = 0 , count = 0 , ack_count = 0 ; count < process->th_nr ; ltid++ )
[1]675    {
[409]676        target = process->th_tbl[ltid];
[1]677
[436]678        if( target != NULL )                                 // thread exist
[1]679        {
680            count++;
[409]681
[583]682            // set the global blocked bit in target thread descriptor.
683            thread_block( XPTR( local_cxy , target ) , THREAD_BLOCKED_GLOBAL );
[436]684 
[583]685            if( this->core->lid != target->core->lid )
686            {
687                // increment responses counter
688                hal_atomic_add( (void*)&ack_count , 1 );
[409]689
[583]690                // set FLAG_REQ_ACK and &ack_rsp_count in target descriptor
691                thread_set_req_ack( target , (uint32_t *)&ack_count );
[409]692
[583]693                // force scheduling on target thread
694                dev_pic_send_ipi( local_cxy , target->core->lid );
[409]695            }
[1]696        }
[172]697    }
698
[428]699    // release lock protecting process th_tbl[]
[564]700    rwlock_rd_release( &process->th_lock );
[416]701
[593]702    // wait other threads acknowledges  TODO this could be improved...
[409]703    while( 1 )
704    {
[610]705        // exit when all scheduler acknowledges received
[436]706        if ( ack_count == 0 ) break;
[409]707   
708        // wait 1000 cycles before retry
709        hal_fixed_delay( 1000 );
710    }
[1]711
[438]712#if DEBUG_PROCESS_SIGACTION
[433]713cycle = (uint32_t)hal_get_cycles();
[438]714if( DEBUG_PROCESS_SIGACTION < cycle )
[593]715printk("\n[%s] thread[%x,%x] exit for process %x in cluster %x / cycle %d\n",
716__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy , cycle );
[433]717#endif
[409]718
[428]719}  // end process_block_threads()
[409]720
[440]721/////////////////////////////////////////////////
722void process_delete_threads( process_t * process,
723                             xptr_t      client_xp )
[409]724{
[433]725    thread_t          * this;          // pointer on calling thread
[440]726    thread_t          * target;        // local pointer on target thread
727    xptr_t              target_xp;     // extended pointer on target thread
728    cxy_t               owner_cxy;     // owner process cluster
[409]729    uint32_t            ltid;          // index in process th_tbl
[440]730    uint32_t            count;         // threads counter
[409]731
[433]732    // get calling thread pointer
733    this = CURRENT_THREAD;
[409]734
[440]735    // get target process owner cluster
736    owner_cxy = CXY_FROM_PID( process->pid );
737
[438]738#if DEBUG_PROCESS_SIGACTION
[433]739uint32_t cycle = (uint32_t)hal_get_cycles();
[438]740if( DEBUG_PROCESS_SIGACTION < cycle )
[593]741printk("\n[%s] thread[%x,%x] enter in cluster %x for process %x / cycle %d\n",
[583]742__FUNCTION__, this->process->pid, this->trdid, local_cxy, process->pid, cycle );
[433]743#endif
744
[564]745// check target process is an user process
[593]746assert( (LPID_FROM_PID( process->pid ) != 0), "process %x not an user process", process->pid );
[564]747
[409]748    // get lock protecting process th_tbl[]
[583]749    rwlock_wr_acquire( &process->th_lock );
[409]750
[440]751    // loop on target process local threads                       
[416]752    // we use both "ltid" and "count" because it can exist "holes" in th_tbl
[440]753    for( ltid = 0 , count = 0  ; count < process->th_nr ; ltid++ )
[1]754    {
[409]755        target = process->th_tbl[ltid];
[1]756
[440]757        if( target != NULL )    // valid thread 
[1]758        {
[416]759            count++;
[440]760            target_xp = XPTR( local_cxy , target );
[1]761
[564]762            // main thread and client thread should not be deleted
[440]763            if( ((ltid != 0) || (owner_cxy != local_cxy)) &&         // not main thread
764                (client_xp) != target_xp )                           // not client thread
765            {
766                // mark target thread for delete and block it
767                thread_delete( target_xp , process->pid , false );   // not forced
768            }
[409]769        }
770    }
[1]771
[428]772    // release lock protecting process th_tbl[]
[583]773    rwlock_wr_release( &process->th_lock );
[407]774
[438]775#if DEBUG_PROCESS_SIGACTION
[433]776cycle = (uint32_t)hal_get_cycles();
[438]777if( DEBUG_PROCESS_SIGACTION < cycle )
[593]778printk("\n[%s] thread[%x,%x] exit for process %x in cluster %x / cycle %d\n",
779__FUNCTION__, this->process->pid, this->trdid, process->pid, local_cxy , cycle );
[433]780#endif
[407]781
[440]782}  // end process_delete_threads()
[409]783
[440]784///////////////////////////////////////////////////
785void process_unblock_threads( process_t * process )
[409]786{
[440]787    thread_t          * target;        // pointer on target thead
788    thread_t          * this;          // pointer on calling thread
[409]789    uint32_t            ltid;          // index in process th_tbl
[440]790    uint32_t            count;         // requests counter
[409]791
[440]792    // get calling thread pointer
793    this = CURRENT_THREAD;
794
[438]795#if DEBUG_PROCESS_SIGACTION
[564]796pid_t pid = process->pid;
[433]797uint32_t cycle = (uint32_t)hal_get_cycles();
[438]798if( DEBUG_PROCESS_SIGACTION < cycle )
[593]799printk("\n[%s] thread[%x,%x] enter for process %x in cluster %x / cycle %d\n",
[583]800__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy , cycle );
[433]801#endif
802
[564]803// check target process is an user process
804assert( ( process->pid != 0 ),
805"target process must be an user process" );
806
[416]807    // get lock protecting process th_tbl[]
[564]808    rwlock_rd_acquire( &process->th_lock );
[416]809
[440]810    // loop on process threads to unblock all threads
[416]811    // we use both "ltid" and "count" because it can exist "holes" in th_tbl
[440]812    for( ltid = 0 , count = 0 ; count < process->th_nr ; ltid++ )
[409]813    {
[416]814        target = process->th_tbl[ltid];
[409]815
[440]816        if( target != NULL )             // thread found
[409]817        {
818            count++;
[440]819
820            // reset the global blocked bit in target thread descriptor.
821            thread_unblock( XPTR( local_cxy , target ) , THREAD_BLOCKED_GLOBAL );
[1]822        }
823    }
824
[428]825    // release lock protecting process th_tbl[]
[564]826    rwlock_rd_release( &process->th_lock );
[407]827
[438]828#if DEBUG_PROCESS_SIGACTION
[433]829cycle = (uint32_t)hal_get_cycles();
[438]830if( DEBUG_PROCESS_SIGACTION < cycle )
[593]831printk("\n[%s] thread[%x,%x] exit for process %x in cluster %x / cycle %d\n",
[583]832__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy, cycle );
[433]833#endif
[1]834
[440]835}  // end process_unblock_threads()
[407]836
[1]837///////////////////////////////////////////////
838process_t * process_get_local_copy( pid_t pid )
839{
840    error_t        error;
[172]841    process_t    * process_ptr;   // local pointer on process
[23]842    xptr_t         process_xp;    // extended pointer on process
[1]843
844    cluster_t * cluster = LOCAL_CLUSTER;
845
[564]846#if DEBUG_PROCESS_GET_LOCAL_COPY
847thread_t * this = CURRENT_THREAD;
848uint32_t cycle = (uint32_t)hal_get_cycles();
849if( DEBUG_PROCESS_GET_LOCAL_COPY < cycle )
[593]850printk("\n[%s] thread[%x,%x] enter for process %x in cluster %x / cycle %d\n",
[583]851__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy, cycle );
[564]852#endif
853
[1]854    // get lock protecting local list of processes
[564]855    remote_queuelock_acquire( XPTR( local_cxy , &cluster->pmgr.local_lock ) );
[1]856
857    // scan the local list of process descriptors to find the process
[23]858    xptr_t  iter;
859    bool_t  found = false;
860    XLIST_FOREACH( XPTR( local_cxy , &cluster->pmgr.local_root ) , iter )
[1]861    {
[23]862        process_xp  = XLIST_ELEMENT( iter , process_t , local_list );
[435]863        process_ptr = GET_PTR( process_xp );
[23]864        if( process_ptr->pid == pid )
[1]865        {
866            found = true;
867            break;
868        }
869    }
870
871    // release lock protecting local list of processes
[564]872    remote_queuelock_release( XPTR( local_cxy , &cluster->pmgr.local_lock ) );
[1]873
[172]874    // allocate memory for a new local process descriptor
[440]875    // and initialise it from reference cluster if not found
[1]876    if( !found )
877    {
878        // get extended pointer on reference process descriptor
[23]879        xptr_t ref_xp = cluster_get_reference_process_from_pid( pid );
[1]880
[492]881        assert( (ref_xp != XPTR_NULL) , "illegal pid\n" );
[23]882
[1]883        // allocate memory for local process descriptor
[23]884        process_ptr = process_alloc();
[443]885
[23]886        if( process_ptr == NULL )  return NULL;
[1]887
888        // initialize local process descriptor copy
[23]889        error = process_copy_init( process_ptr , ref_xp );
[443]890
[1]891        if( error ) return NULL;
892    }
893
[440]894#if DEBUG_PROCESS_GET_LOCAL_COPY
[564]895cycle = (uint32_t)hal_get_cycles();
[440]896if( DEBUG_PROCESS_GET_LOCAL_COPY < cycle )
[593]897printk("\n[%s] thread[%x,%x] exit in cluster %x / process %x / cycle %d\n",
[583]898__FUNCTION__, this->process->pid, this->trdid, local_cxy, process_ptr, cycle );
[440]899#endif
900
[23]901    return process_ptr;
[1]902
[409]903}  // end process_get_local_copy()
904
[436]905////////////////////////////////////////////
906pid_t process_get_ppid( xptr_t  process_xp )
907{
908    cxy_t       process_cxy;
909    process_t * process_ptr;
910    xptr_t      parent_xp;
911    cxy_t       parent_cxy;
912    process_t * parent_ptr;
913
914    // get process cluster and local pointer
915    process_cxy = GET_CXY( process_xp );
916    process_ptr = GET_PTR( process_xp );
917
918    // get pointers on parent process
[564]919    parent_xp  = (xptr_t)hal_remote_l64( XPTR( process_cxy , &process_ptr->parent_xp ) );
[436]920    parent_cxy = GET_CXY( parent_xp );
921    parent_ptr = GET_PTR( parent_xp );
922
[564]923    return hal_remote_l32( XPTR( parent_cxy , &parent_ptr->pid ) );
[436]924}
925
[1]926//////////////////////////////////////////////////////////////////////////////////////////
927// File descriptor array related functions
928//////////////////////////////////////////////////////////////////////////////////////////
929
930///////////////////////////////////////////
931void process_fd_init( process_t * process )
932{
933    uint32_t fd;
934
[610]935    // initialize lock
[564]936    remote_queuelock_init( XPTR( local_cxy , &process->fd_array.lock ), LOCK_PROCESS_FDARRAY );
[1]937
[610]938    // initialize number of open files
[23]939    process->fd_array.current = 0;
940
[1]941    // initialize array
[23]942    for ( fd = 0 ; fd < CONFIG_PROCESS_FILE_MAX_NR ; fd++ )
[1]943    {
944        process->fd_array.array[fd] = XPTR_NULL;
945    }
946}
[610]947////////////////////////////////////////////////////
948error_t process_fd_register( xptr_t      process_xp,
[407]949                             xptr_t      file_xp,
950                             uint32_t  * fdid )
[1]951{
952    bool_t    found;
[23]953    uint32_t  id;
954    xptr_t    xp;
[1]955
[23]956    // get reference process cluster and local pointer
[610]957    process_t * process_ptr = GET_PTR( process_xp );
958    cxy_t       process_cxy = GET_CXY( process_xp );
[23]959
[610]960// check client process is reference process
961assert( (process_xp == hal_remote_l64( XPTR( process_cxy , &process_ptr->ref_xp ) ) ),
962"client process must be reference process\n" );
963
964#if DEBUG_PROCESS_FD_REGISTER
965thread_t * this  = CURRENT_THREAD;
966uint32_t   cycle = (uint32_t)hal_get_cycles();
967pid_t      pid   = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid) );
968if( DEBUG_PROCESS_FD_REGISTER < cycle )
969printk("\n[%s] thread[%x,%x] enter for process %x / cycle %d\n",
970__FUNCTION__, this->process->pid, this->trdid, pid, cycle );
971#endif
972
973    // build extended pointer on lock protecting reference fd_array
974    xptr_t lock_xp = XPTR( process_cxy , &process_ptr->fd_array.lock );
975
[23]976    // take lock protecting reference fd_array
[610]977        remote_queuelock_acquire( lock_xp );
[23]978
[1]979    found   = false;
980
[23]981    for ( id = 0; id < CONFIG_PROCESS_FILE_MAX_NR ; id++ )
[1]982    {
[610]983        xp = hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[id] ) );
[23]984        if ( xp == XPTR_NULL )
[1]985        {
[564]986            // update reference fd_array
[610]987            hal_remote_s64( XPTR( process_cxy , &process_ptr->fd_array.array[id] ) , file_xp );
988                hal_remote_atomic_add( XPTR( process_cxy , &process_ptr->fd_array.current ) , 1 );
[564]989
990            // exit
991                        *fdid = id;
[1]992            found = true;
993            break;
994        }
995    }
996
[610]997    // release lock protecting fd_array
998        remote_queuelock_release( lock_xp );
[1]999
[610]1000#if DEBUG_PROCESS_FD_REGISTER
1001cycle = (uint32_t)hal_get_cycles();
1002if( DEBUG_PROCESS_FD_REGISTER < cycle )
1003printk("\n[%s] thread[%x,%x] exit for process %x / fdid %d / cycle %d\n",
1004__FUNCTION__, this->process->pid, this->trdid, pid, id, cycle );
1005#endif
1006
[428]1007    if ( !found ) return -1;
[1]1008    else          return 0;
1009
[610]1010}  // end process_fd_register()
1011
[172]1012////////////////////////////////////////////////
[23]1013xptr_t process_fd_get_xptr( process_t * process,
[407]1014                            uint32_t    fdid )
[1]1015{
[23]1016    xptr_t  file_xp;
[564]1017    xptr_t  lock_xp;
[1]1018
[23]1019    // access local copy of process descriptor
[407]1020    file_xp = process->fd_array.array[fdid];
[1]1021
[23]1022    if( file_xp == XPTR_NULL )
1023    {
1024        // get reference process cluster and local pointer
1025        xptr_t      ref_xp  = process->ref_xp;
1026        cxy_t       ref_cxy = GET_CXY( ref_xp );
[435]1027        process_t * ref_ptr = GET_PTR( ref_xp );
[1]1028
[564]1029        // build extended pointer on lock protecting reference fd_array
1030        lock_xp = XPTR( ref_cxy , &ref_ptr->fd_array.lock );
1031
1032        // take lock protecting reference fd_array
1033            remote_queuelock_acquire( lock_xp );
1034
[23]1035        // access reference process descriptor
[564]1036        file_xp = hal_remote_l64( XPTR( ref_cxy , &ref_ptr->fd_array.array[fdid] ) );
[1]1037
[23]1038        // update local fd_array if found
[564]1039        if( file_xp != XPTR_NULL )  process->fd_array.array[fdid] = file_xp;
1040       
1041        // release lock protecting reference fd_array
1042            remote_queuelock_release( lock_xp );
[23]1043    }
[1]1044
[23]1045    return file_xp;
[1]1046
[407]1047}  // end process_fd_get_xptr()
1048
[1]1049///////////////////////////////////////////
1050void process_fd_remote_copy( xptr_t dst_xp,
1051                             xptr_t src_xp )
1052{
1053    uint32_t fd;
1054    xptr_t   entry;
1055
1056    // get cluster and local pointer for src fd_array
1057    cxy_t        src_cxy = GET_CXY( src_xp );
[435]1058    fd_array_t * src_ptr = GET_PTR( src_xp );
[1]1059
1060    // get cluster and local pointer for dst fd_array
1061    cxy_t        dst_cxy = GET_CXY( dst_xp );
[435]1062    fd_array_t * dst_ptr = GET_PTR( dst_xp );
[1]1063
1064    // get the remote lock protecting the src fd_array
[564]1065        remote_queuelock_acquire( XPTR( src_cxy , &src_ptr->lock ) );
[1]1066
[428]1067    // loop on all fd_array entries
1068    for( fd = 0 ; fd < CONFIG_PROCESS_FILE_MAX_NR ; fd++ )
[1]1069        {
[564]1070                entry = (xptr_t)hal_remote_l64( XPTR( src_cxy , &src_ptr->array[fd] ) );
[1]1071
1072                if( entry != XPTR_NULL )
1073                {
[459]1074            // increment file descriptor refcount
[1]1075            vfs_file_count_up( entry );
1076
1077                        // copy entry in destination process fd_array
[564]1078                        hal_remote_s64( XPTR( dst_cxy , &dst_ptr->array[fd] ) , entry );
[1]1079                }
1080        }
1081
1082    // release lock on source process fd_array
[564]1083        remote_queuelock_release( XPTR( src_cxy , &src_ptr->lock ) );
[1]1084
[407]1085}  // end process_fd_remote_copy()
1086
[564]1087
1088////////////////////////////////////
1089bool_t process_fd_array_full( void )
1090{
1091    // get extended pointer on reference process
1092    xptr_t ref_xp = CURRENT_THREAD->process->ref_xp;
1093
1094    // get reference process cluster and local pointer
1095    process_t * ref_ptr = GET_PTR( ref_xp );
1096    cxy_t       ref_cxy = GET_CXY( ref_xp );
1097
1098    // get number of open file descriptors from reference fd_array
1099    uint32_t current = hal_remote_l32( XPTR( ref_cxy , &ref_ptr->fd_array.current ) );
1100
1101        return ( current >= CONFIG_PROCESS_FILE_MAX_NR );
1102}
1103
1104
[1]1105////////////////////////////////////////////////////////////////////////////////////
1106//  Thread related functions
1107////////////////////////////////////////////////////////////////////////////////////
1108
1109/////////////////////////////////////////////////////
1110error_t process_register_thread( process_t * process,
1111                                 thread_t  * thread,
1112                                 trdid_t   * trdid )
1113{
[472]1114    ltid_t         ltid;
1115    bool_t         found = false;
1116 
[564]1117// check arguments
1118assert( (process != NULL) , "process argument is NULL" );
1119assert( (thread != NULL) , "thread argument is NULL" );
[1]1120
[564]1121    // get the lock protecting th_tbl for all threads
1122    // but the idle thread executing kernel_init (cannot yield)
1123    if( thread->type != THREAD_IDLE ) rwlock_wr_acquire( &process->th_lock );
[1]1124
[583]1125    // scan th_tbl
[564]1126    for( ltid = 0 ; ltid < CONFIG_THREADS_MAX_PER_CLUSTER ; ltid++ )
[1]1127    {
1128        if( process->th_tbl[ltid] == NULL )
1129        {
1130            found = true;
1131            break;
1132        }
1133    }
1134
1135    if( found )
1136    {
1137        // register thread in th_tbl[]
1138        process->th_tbl[ltid] = thread;
1139        process->th_nr++;
1140
1141        // returns trdid
1142        *trdid = TRDID( local_cxy , ltid );
1143    }
1144
[583]1145    // release the lock protecting th_tbl
[564]1146    if( thread->type != THREAD_IDLE ) rwlock_wr_release( &process->th_lock );
[428]1147
[564]1148    return (found) ? 0 : 0xFFFFFFFF;
[204]1149
1150}  // end process_register_thread()
1151
[443]1152/////////////////////////////////////////////////
1153bool_t process_remove_thread( thread_t * thread )
[1]1154{
[443]1155    uint32_t count;  // number of threads in local process descriptor
1156
[1]1157    process_t * process = thread->process;
1158
1159    // get thread local index
1160    ltid_t  ltid = LTID_FROM_TRDID( thread->trdid );
[564]1161   
1162    // get the lock protecting th_tbl[]
1163    rwlock_wr_acquire( &process->th_lock );
[428]1164
[583]1165    // get number of threads
[443]1166    count = process->th_nr;
[428]1167
[583]1168// check thread
1169assert( (thread != NULL) , "thread argument is NULL" );
1170
[564]1171// check th_nr value
[583]1172assert( (count > 0) , "process th_nr cannot be 0\n" );
[443]1173
[1]1174    // remove thread from th_tbl[]
1175    process->th_tbl[ltid] = NULL;
[450]1176    process->th_nr = count-1;
[1]1177
[583]1178    // release lock protecting th_tbl
[564]1179    rwlock_wr_release( &process->th_lock );
[428]1180
[443]1181    return (count == 1);
1182
[450]1183}  // end process_remove_thread()
[204]1184
[408]1185/////////////////////////////////////////////////////////
1186error_t process_make_fork( xptr_t      parent_process_xp,
1187                           xptr_t      parent_thread_xp,
1188                           pid_t     * child_pid,
1189                           thread_t ** child_thread )
[1]1190{
[408]1191    process_t * process;         // local pointer on child process descriptor
1192    thread_t  * thread;          // local pointer on child thread descriptor
1193    pid_t       new_pid;         // process identifier for child process
1194    pid_t       parent_pid;      // process identifier for parent process
1195    xptr_t      ref_xp;          // extended pointer on reference process
[428]1196    xptr_t      vfs_bin_xp;      // extended pointer on .elf file
[408]1197    error_t     error;
[1]1198
[408]1199    // get cluster and local pointer for parent process
1200    cxy_t       parent_process_cxy = GET_CXY( parent_process_xp );
[435]1201    process_t * parent_process_ptr = GET_PTR( parent_process_xp );
[101]1202
[428]1203    // get parent process PID and extended pointer on .elf file
[564]1204    parent_pid = hal_remote_l32 (XPTR( parent_process_cxy , &parent_process_ptr->pid));
1205    vfs_bin_xp = hal_remote_l64(XPTR( parent_process_cxy , &parent_process_ptr->vfs_bin_xp));
[428]1206
[564]1207    // get extended pointer on reference process
1208    ref_xp = hal_remote_l64( XPTR( parent_process_cxy , &parent_process_ptr->ref_xp ) );
[438]1209
[564]1210// check parent process is the reference process
1211assert( (parent_process_xp == ref_xp ) ,
1212"parent process must be the reference process\n" );
[407]1213
[438]1214#if DEBUG_PROCESS_MAKE_FORK
[583]1215uint32_t cycle   = (uint32_t)hal_get_cycles();
1216thread_t * this  = CURRENT_THREAD;
1217trdid_t    trdid = this->trdid;
1218pid_t      pid   = this->process->pid;
[438]1219if( DEBUG_PROCESS_MAKE_FORK < cycle )
[593]1220printk("\n[%s] thread[%x,%x] enter / cluster %x / cycle %d\n",
[583]1221__FUNCTION__, pid, trdid, local_cxy, cycle );
[433]1222#endif
[172]1223
[408]1224    // allocate a process descriptor
1225    process = process_alloc();
1226    if( process == NULL )
1227    {
1228        printk("\n[ERROR] in %s : cannot get process in cluster %x\n", 
1229        __FUNCTION__, local_cxy ); 
1230        return -1;
1231    }
[1]1232
[408]1233    // allocate a child PID from local cluster
[416]1234    error = cluster_pid_alloc( process , &new_pid );
[428]1235    if( error ) 
[1]1236    {
[408]1237        printk("\n[ERROR] in %s : cannot get PID in cluster %x\n", 
1238        __FUNCTION__, local_cxy ); 
1239        process_free( process );
1240        return -1;
[1]1241    }
[408]1242
[469]1243#if( DEBUG_PROCESS_MAKE_FORK & 1 )
[457]1244cycle = (uint32_t)hal_get_cycles();
1245if( DEBUG_PROCESS_MAKE_FORK < cycle )
[593]1246printk("\n[%s] thread[%x,%x] allocated process %x / cycle %d\n",
[583]1247__FUNCTION__, pid, trdid, new_pid, cycle );
[457]1248#endif
1249
[408]1250    // initializes child process descriptor from parent process descriptor
1251    process_reference_init( process,
1252                            new_pid,
1253                            parent_process_xp );
1254
[438]1255#if( DEBUG_PROCESS_MAKE_FORK & 1 )
[433]1256cycle = (uint32_t)hal_get_cycles();
[438]1257if( DEBUG_PROCESS_MAKE_FORK < cycle )
[593]1258printk("\n[%s] thread[%x,%x] initialized child_process %x / cycle %d\n",
[583]1259__FUNCTION__, pid, trdid, new_pid, cycle );
[433]1260#endif
[408]1261
[457]1262
[408]1263    // copy VMM from parent descriptor to child descriptor
1264    error = vmm_fork_copy( process,
1265                           parent_process_xp );
1266    if( error )
[101]1267    {
[408]1268        printk("\n[ERROR] in %s : cannot copy VMM in cluster %x\n", 
1269        __FUNCTION__, local_cxy ); 
1270        process_free( process );
1271        cluster_pid_release( new_pid );
1272        return -1;
[101]1273    }
[172]1274
[438]1275#if( DEBUG_PROCESS_MAKE_FORK & 1 )
[433]1276cycle = (uint32_t)hal_get_cycles();
[438]1277if( DEBUG_PROCESS_MAKE_FORK < cycle )
[593]1278printk("\n[%s] thread[%x,%x] copied VMM from parent to child / cycle %d\n",
[583]1279__FUNCTION__, pid, trdid, cycle );
[433]1280#endif
[407]1281
[564]1282    // if parent_process is INIT, or if parent_process is the TXT owner,
1283    // the child_process becomes the owner of its TXT terminal
1284    if( (parent_pid == 1) || process_txt_is_owner( parent_process_xp ) )
[457]1285    {
1286        process_txt_set_ownership( XPTR( local_cxy , process ) );
1287
1288#if( DEBUG_PROCESS_MAKE_FORK & 1 )
1289cycle = (uint32_t)hal_get_cycles();
1290if( DEBUG_PROCESS_MAKE_EXEC < cycle )
[593]1291printk("\n[%s] thread[%x,%x] / child takes TXT ownership / cycle %d\n",
[583]1292__FUNCTION__ , pid, trdid, cycle );
[457]1293#endif
1294
1295    }
1296
[428]1297    // update extended pointer on .elf file
1298    process->vfs_bin_xp = vfs_bin_xp;
1299
[408]1300    // create child thread descriptor from parent thread descriptor
1301    error = thread_user_fork( parent_thread_xp,
1302                              process,
1303                              &thread );
1304    if( error )
1305    {
1306        printk("\n[ERROR] in %s : cannot create thread in cluster %x\n",
1307        __FUNCTION__, local_cxy ); 
1308        process_free( process );
1309        cluster_pid_release( new_pid );
1310        return -1;
1311    }
[172]1312
[564]1313// check main thread LTID
1314assert( (LTID_FROM_TRDID(thread->trdid) == 0) ,
1315"main thread must have LTID == 0\n" );
[428]1316
[564]1317#if( DEBUG_PROCESS_MAKE_FORK & 1 )
[433]1318cycle = (uint32_t)hal_get_cycles();
[438]1319if( DEBUG_PROCESS_MAKE_FORK < cycle )
[593]1320printk("\n[%s] thread[%x,%x] created main thread %x / cycle %d\n", 
[583]1321__FUNCTION__, pid, trdid, thread, cycle );
[433]1322#endif
[1]1323
[433]1324    // set Copy_On_Write flag in parent process GPT
[408]1325    // this includes all replicated GPT copies
1326    if( parent_process_cxy == local_cxy )   // reference is local
1327    {
1328        vmm_set_cow( parent_process_ptr );
1329    }
1330    else                                    // reference is remote
1331    {
1332        rpc_vmm_set_cow_client( parent_process_cxy,
1333                                parent_process_ptr );
1334    }
[1]1335
[433]1336    // set Copy_On_Write flag in child process GPT
1337    vmm_set_cow( process );
1338 
[438]1339#if( DEBUG_PROCESS_MAKE_FORK & 1 )
[433]1340cycle = (uint32_t)hal_get_cycles();
[438]1341if( DEBUG_PROCESS_MAKE_FORK < cycle )
[593]1342printk("\n[%s] thread[%x,%x] set COW in parent and child / cycle %d\n",
[583]1343__FUNCTION__, pid, trdid, cycle );
[433]1344#endif
[101]1345
[428]1346    // get extended pointers on parent children_root, children_lock and children_nr
1347    xptr_t children_root_xp = XPTR( parent_process_cxy , &parent_process_ptr->children_root );
1348    xptr_t children_lock_xp = XPTR( parent_process_cxy , &parent_process_ptr->children_lock );
1349    xptr_t children_nr_xp   = XPTR( parent_process_cxy , &parent_process_ptr->children_nr   );
[101]1350
[428]1351    // register process in parent children list
[564]1352    remote_queuelock_acquire( children_lock_xp );
[428]1353        xlist_add_last( children_root_xp , XPTR( local_cxy , &process->children_list ) );
1354        hal_remote_atomic_add( children_nr_xp , 1 );
[564]1355    remote_queuelock_release( children_lock_xp );
[204]1356
[408]1357    // return success
1358    *child_thread = thread;
1359    *child_pid    = new_pid;
[1]1360
[438]1361#if DEBUG_PROCESS_MAKE_FORK
[433]1362cycle = (uint32_t)hal_get_cycles();
[438]1363if( DEBUG_PROCESS_MAKE_FORK < cycle )
[593]1364printk("\n[%s] thread[%x,%x] exit / created process %x / cycle %d\n",
[583]1365__FUNCTION__, pid, trdid, new_pid, cycle );
[433]1366#endif
[428]1367
[408]1368    return 0;
1369
[416]1370}   // end process_make_fork()
[408]1371
1372/////////////////////////////////////////////////////
1373error_t process_make_exec( exec_info_t  * exec_info )
1374{
[457]1375    thread_t       * thread;                  // local pointer on this thread
1376    process_t      * process;                 // local pointer on this process
1377    pid_t            pid;                     // this process identifier
[610]1378    xptr_t           ref_xp;                  // reference process for this process
[441]1379        error_t          error;                   // value returned by called functions
[457]1380    char           * path;                    // path to .elf file
1381    xptr_t           file_xp;                 // extended pointer on .elf file descriptor
1382    uint32_t         file_id;                 // file index in fd_array
1383    uint32_t         args_nr;                 // number of main thread arguments
1384    char          ** args_pointers;           // array of pointers on main thread arguments
[446]1385
[610]1386    // get thread, process, pid and ref_xp
[457]1387    thread  = CURRENT_THREAD;
1388    process = thread->process;
1389    pid     = process->pid;
[610]1390    ref_xp  = process->ref_xp;
[408]1391
[457]1392        // get relevant infos from exec_info
1393        path          = exec_info->path;
1394    args_nr       = exec_info->args_nr;
1395    args_pointers = exec_info->args_pointers;
[408]1396
[438]1397#if DEBUG_PROCESS_MAKE_EXEC
[433]1398uint32_t cycle = (uint32_t)hal_get_cycles();
[438]1399if( DEBUG_PROCESS_MAKE_EXEC < cycle )
[593]1400printk("\n[%s] thread[%x,%x] enters for %s / cycle %d\n",
[583]1401__FUNCTION__, pid, thread->trdid, path, cycle );
[433]1402#endif
[408]1403
[457]1404    // open the file identified by <path>
1405    file_xp = XPTR_NULL;
[564]1406    file_id = 0xFFFFFFFF;
[610]1407        error   = vfs_open( process->vfs_root_xp,
[457]1408                            path,
[610]1409                        ref_xp,
[457]1410                            O_RDONLY,
1411                            0,
1412                            &file_xp,
1413                            &file_id );
1414        if( error )
1415        {
1416                printk("\n[ERROR] in %s : failed to open file <%s>\n", __FUNCTION__ , path );
1417                return -1;
1418        }
1419
[446]1420#if (DEBUG_PROCESS_MAKE_EXEC & 1)
[469]1421cycle = (uint32_t)hal_get_cycles();
[446]1422if( DEBUG_PROCESS_MAKE_EXEC < cycle )
[593]1423printk("\n[%s] thread[%x,%x] opened file <%s> / cycle %d\n",
[583]1424__FUNCTION__, pid, thread->trdid, path, cycle );
[446]1425#endif
1426
[457]1427    // delete all threads other than this main thread in all clusters
1428    process_sigaction( pid , DELETE_ALL_THREADS );
[446]1429
[469]1430#if (DEBUG_PROCESS_MAKE_EXEC & 1)
1431cycle = (uint32_t)hal_get_cycles();
1432if( DEBUG_PROCESS_MAKE_EXEC < cycle )
[593]1433printk("\n[%s] thread[%x,%x] deleted all threads / cycle %d\n",
[583]1434__FUNCTION__, pid, thread->trdid, cycle );
[469]1435#endif
1436
[457]1437    // reset local process VMM
1438    vmm_destroy( process );
[446]1439
[457]1440#if( DEBUG_PROCESS_MAKE_EXEC & 1 )
1441cycle = (uint32_t)hal_get_cycles();
1442if( DEBUG_PROCESS_MAKE_EXEC < cycle )
[593]1443printk("\n[%s] thread[%x,%x] reset VMM / cycle %d\n",
[583]1444__FUNCTION__, pid, thread->trdid, cycle );
[457]1445#endif
[408]1446
[457]1447    // re-initialize the VMM (kentry/args/envs vsegs registration)
1448    error = vmm_init( process );
1449    if( error )
[416]1450    {
[457]1451        printk("\n[ERROR] in %s : cannot initialise VMM for %s\n", __FUNCTION__ , path );
1452        vfs_close( file_xp , file_id );
1453        // FIXME restore old process VMM
[416]1454        return -1;
1455    }
[457]1456   
[438]1457#if( DEBUG_PROCESS_MAKE_EXEC & 1 )
[433]1458cycle = (uint32_t)hal_get_cycles();
[438]1459if( DEBUG_PROCESS_MAKE_EXEC < cycle )
[593]1460printk("\n[%s] thread[%x,%x] / kentry/args/envs vsegs registered / cycle %d\n",
[583]1461__FUNCTION__, pid, thread->trdid, cycle );
[433]1462#endif
[428]1463
[457]1464    // register code & data vsegs as well as entry-point in process VMM,
[428]1465    // and register extended pointer on .elf file in process descriptor
[457]1466        error = elf_load_process( file_xp , process );
[441]1467    if( error )
[1]1468        {
[441]1469                printk("\n[ERROR] in %s : failed to access <%s>\n", __FUNCTION__ , path );
[457]1470        vfs_close( file_xp , file_id );
1471        // FIXME restore old process VMM
[408]1472        return -1;
[1]1473        }
1474
[438]1475#if( DEBUG_PROCESS_MAKE_EXEC & 1 )
[433]1476cycle = (uint32_t)hal_get_cycles();
[438]1477if( DEBUG_PROCESS_MAKE_EXEC < cycle )
[593]1478printk("\n[%s] thread[%x,%x] / code/data vsegs registered / cycle %d\n",
[583]1479__FUNCTION__, pid, thread->trdid, cycle );
[433]1480#endif
[1]1481
[457]1482    // update the existing main thread descriptor... and jump to user code
1483    error = thread_user_exec( (void *)process->vmm.entry_point,
1484                              args_nr,
1485                              args_pointers );
1486    if( error )
1487    {
[469]1488        printk("\n[ERROR] in %s : cannot update main thread for %s\n", __FUNCTION__ , path );
[457]1489        vfs_close( file_xp , file_id );
1490        // FIXME restore old process VMM
[408]1491        return -1;
[457]1492    }
[1]1493
[492]1494    assert( false, "we should not execute this code");
[457]1495 
[409]1496        return 0;
1497
1498}  // end process_make_exec()
1499
[457]1500
[428]1501///////////////////////////////////////////////
1502void process_zero_create( process_t * process )
1503{
[580]1504    error_t error;
1505    pid_t   pid;
[428]1506
[438]1507#if DEBUG_PROCESS_ZERO_CREATE
[433]1508uint32_t cycle = (uint32_t)hal_get_cycles();
[438]1509if( DEBUG_PROCESS_ZERO_CREATE < cycle )
[593]1510printk("\n[%s] enter / cluster %x / cycle %d\n",
[564]1511__FUNCTION__, local_cxy, cycle );
[433]1512#endif
[428]1513
[580]1514    // get PID from local cluster manager for this kernel process
1515    error = cluster_pid_alloc( process , &pid );
1516
1517    if( error || (LPID_FROM_PID( pid ) != 0) )
1518    {
1519        printk("\n[PANIC] in %s : cannot get valid PID in cluster %x / PID = %x\n",
1520        __FUNCTION__ , local_cxy, pid );
1521        hal_core_sleep();
1522    }
1523
[428]1524    // initialize PID, REF_XP, PARENT_XP, and STATE
[580]1525    // the kernel process_zero is its own parent_process,
1526    // reference_process, and owner_process, and cannot be killed...
1527    process->pid        = pid;
[433]1528    process->ref_xp     = XPTR( local_cxy , process );
[443]1529    process->owner_xp   = XPTR( local_cxy , process );
[580]1530    process->parent_xp  = XPTR( local_cxy , process );
[433]1531    process->term_state = 0;
[428]1532
[564]1533    // reset th_tbl[] array and associated fields
[428]1534    uint32_t i;
[564]1535    for( i = 0 ; i < CONFIG_THREADS_MAX_PER_CLUSTER ; i++ )
[428]1536        {
1537        process->th_tbl[i] = NULL;
1538    }
1539    process->th_nr  = 0;
[564]1540    rwlock_init( &process->th_lock , LOCK_PROCESS_THTBL );
[428]1541
[564]1542
[428]1543    // reset children list as empty
1544    xlist_root_init( XPTR( local_cxy , &process->children_root ) );
1545    process->children_nr = 0;
[564]1546    remote_queuelock_init( XPTR( local_cxy , &process->children_lock ),
1547                           LOCK_PROCESS_CHILDREN );
[428]1548
[580]1549    // register kernel process in cluster manager local_list
1550    cluster_process_local_link( process );
1551   
[428]1552        hal_fence();
1553
[438]1554#if DEBUG_PROCESS_ZERO_CREATE
[433]1555cycle = (uint32_t)hal_get_cycles();
[438]1556if( DEBUG_PROCESS_ZERO_CREATE < cycle )
[593]1557printk("\n[%s] exit / cluster %x / cycle %d\n",
[564]1558__FUNCTION__, local_cxy, cycle );
[433]1559#endif
[428]1560
[610]1561}  // end process_zero_create()
[428]1562
[564]1563////////////////////////////////
[485]1564void process_init_create( void )
[1]1565{
[428]1566    process_t      * process;       // local pointer on process descriptor
[409]1567    pid_t            pid;           // process_init identifier
1568    thread_t       * thread;        // local pointer on main thread
1569    pthread_attr_t   attr;          // main thread attributes
1570    lid_t            lid;           // selected core local index for main thread
[457]1571    xptr_t           file_xp;       // extended pointer on .elf file descriptor
1572    uint32_t         file_id;       // file index in fd_array
[409]1573    error_t          error;
[1]1574
[438]1575#if DEBUG_PROCESS_INIT_CREATE
[610]1576thread_t * this = CURRENT_THREAD;
[433]1577uint32_t cycle = (uint32_t)hal_get_cycles();
[438]1578if( DEBUG_PROCESS_INIT_CREATE < cycle )
[610]1579printk("\n[%s] thread[%x,%x] enter / cycle %d\n",
1580__FUNCTION__, this->process->pid, this->trdid, cycle );
[433]1581#endif
[1]1582
[408]1583    // allocates memory for process descriptor from local cluster
1584        process = process_alloc(); 
[457]1585       
[564]1586// check memory allocator
1587assert( (process != NULL),
1588"no memory for process descriptor in cluster %x\n", local_cxy  );
[101]1589
[610]1590    // set the CWD and VFS_ROOT fields in process descriptor
1591    process->cwd_xp      = process_zero.vfs_root_xp;
1592    process->vfs_root_xp = process_zero.vfs_root_xp;
1593
[409]1594    // get PID from local cluster
[416]1595    error = cluster_pid_alloc( process , &pid );
[408]1596
[564]1597// check PID allocator
1598assert( (error == 0),
1599"cannot allocate PID in cluster %x\n", local_cxy );
[409]1600
[564]1601// check PID value
1602assert( (pid == 1) ,
1603"process INIT must be first process in cluster 0\n" );
[457]1604
[409]1605    // initialize process descriptor / parent is local process_zero
1606    process_reference_init( process,
[408]1607                            pid,
[457]1608                            XPTR( local_cxy , &process_zero ) ); 
[408]1609
[564]1610#if(DEBUG_PROCESS_INIT_CREATE & 1)
1611if( DEBUG_PROCESS_INIT_CREATE < cycle )
[610]1612printk("\n[%s] thread[%x,%x] initialized process descriptor\n",
1613__FUNCTION__, this->process->pid, this->trdid );
[564]1614#endif
1615
[457]1616    // open the file identified by CONFIG_PROCESS_INIT_PATH
1617    file_xp = XPTR_NULL;
1618    file_id = -1;
[610]1619        error   = vfs_open( process->vfs_root_xp,
[457]1620                            CONFIG_PROCESS_INIT_PATH,
[610]1621                        XPTR( local_cxy , process ),
[457]1622                            O_RDONLY,
1623                            0,
1624                            &file_xp,
1625                            &file_id );
1626
[564]1627assert( (error == 0),
1628"failed to open file <%s>\n", CONFIG_PROCESS_INIT_PATH );
[457]1629
[564]1630#if(DEBUG_PROCESS_INIT_CREATE & 1)
1631if( DEBUG_PROCESS_INIT_CREATE < cycle )
[610]1632printk("\n[%s] thread[%x,%x] open .elf file decriptor\n",
1633__FUNCTION__, this->process->pid, this->trdid );
[564]1634#endif
1635
1636   // register "code" and "data" vsegs as well as entry-point
[409]1637    // in process VMM, using information contained in the elf file.
[457]1638        error = elf_load_process( file_xp , process );
[101]1639
[564]1640assert( (error == 0),
1641"cannot access .elf file <%s>\n", CONFIG_PROCESS_INIT_PATH );
[457]1642
[564]1643#if(DEBUG_PROCESS_INIT_CREATE & 1)
1644if( DEBUG_PROCESS_INIT_CREATE < cycle )
[610]1645printk("\n[%s] thread[%x,%x] registered code/data vsegs in VMM\n",
1646__FUNCTION__, this->process->pid, this->trdid );
[564]1647#endif
1648
[428]1649    // get extended pointers on process_zero children_root, children_lock
1650    xptr_t children_root_xp = XPTR( local_cxy , &process_zero.children_root );
1651    xptr_t children_lock_xp = XPTR( local_cxy , &process_zero.children_lock );
1652
[564]1653    // take lock protecting kernel process children list
1654    remote_queuelock_acquire( children_lock_xp );
1655
[428]1656    // register process INIT in parent local process_zero
1657        xlist_add_last( children_root_xp , XPTR( local_cxy , &process->children_list ) );
1658        hal_atomic_add( &process_zero.children_nr , 1 );
1659
[564]1660    // release lock protecting kernel process children list
1661    remote_queuelock_release( children_lock_xp );
1662
1663#if(DEBUG_PROCESS_INIT_CREATE & 1)
1664if( DEBUG_PROCESS_INIT_CREATE < cycle )
[610]1665printk("\n[%s] thread[%x,%x] registered init process in parent\n",
1666__FUNCTION__, this->process->pid, this->trdid );
[564]1667#endif
1668
[409]1669    // select a core in local cluster to execute the main thread
1670    lid  = cluster_select_local_core();
1671
1672    // initialize pthread attributes for main thread
1673    attr.attributes = PT_ATTR_DETACH | PT_ATTR_CLUSTER_DEFINED | PT_ATTR_CORE_DEFINED;
1674    attr.cxy        = local_cxy;
1675    attr.lid        = lid;
1676
1677    // create and initialize thread descriptor
1678        error = thread_user_create( pid,
1679                                (void *)process->vmm.entry_point,
1680                                NULL,
1681                                &attr,
1682                                &thread );
[1]1683
[564]1684assert( (error == 0),
1685"cannot create main thread for <%s>\n", CONFIG_PROCESS_INIT_PATH );
[428]1686
[564]1687assert( (thread->trdid == 0),
1688"main thread must have index 0 for <%s>\n", CONFIG_PROCESS_INIT_PATH );
[457]1689
[564]1690#if(DEBUG_PROCESS_INIT_CREATE & 1)
1691if( DEBUG_PROCESS_INIT_CREATE < cycle )
[610]1692printk("\n[%s] thread[%x,%x] created main thread\n",
1693__FUNCTION__, this->process->pid, this->trdid );
[564]1694#endif
1695
[409]1696    // activate thread
1697        thread_unblock( XPTR( local_cxy , thread ) , THREAD_BLOCKED_GLOBAL );
1698
[124]1699    hal_fence();
[1]1700
[438]1701#if DEBUG_PROCESS_INIT_CREATE
[433]1702cycle = (uint32_t)hal_get_cycles();
[438]1703if( DEBUG_PROCESS_INIT_CREATE < cycle )
[610]1704printk("\n[%s] thread[%x,%x] exit / cycle %d\n",
1705__FUNCTION__, this->process->pid, this->trdid, cycle );
[433]1706#endif
[409]1707
[204]1708}  // end process_init_create()
1709
[428]1710/////////////////////////////////////////
1711void process_display( xptr_t process_xp )
1712{
1713    process_t   * process_ptr;
1714    cxy_t         process_cxy;
[443]1715
[428]1716    xptr_t        parent_xp;       // extended pointer on parent process
1717    process_t   * parent_ptr;
1718    cxy_t         parent_cxy;
1719
[443]1720    xptr_t        owner_xp;        // extended pointer on owner process
1721    process_t   * owner_ptr;
1722    cxy_t         owner_cxy;
1723
[428]1724    pid_t         pid;
1725    pid_t         ppid;
[580]1726    lpid_t        lpid;
[428]1727    uint32_t      state;
1728    uint32_t      th_nr;
1729
[443]1730    xptr_t        txt_file_xp;     // extended pointer on TXT_RX file descriptor
1731    xptr_t        txt_chdev_xp;    // extended pointer on TXT_RX chdev
1732    chdev_t     * txt_chdev_ptr;
1733    cxy_t         txt_chdev_cxy;
1734    xptr_t        txt_owner_xp;    // extended pointer on TXT owner process
[428]1735
1736    xptr_t        elf_file_xp;     // extended pointer on .elf file
1737    cxy_t         elf_file_cxy;
1738    vfs_file_t  * elf_file_ptr;
1739    vfs_inode_t * elf_inode_ptr;   // local pointer on .elf inode
1740
1741    char          txt_name[CONFIG_VFS_MAX_NAME_LENGTH];
1742    char          elf_name[CONFIG_VFS_MAX_NAME_LENGTH];
1743
1744    // get cluster and local pointer on process
1745    process_ptr = GET_PTR( process_xp );
1746    process_cxy = GET_CXY( process_xp );
1747
[580]1748    // get process PID, LPID, and state
[564]1749    pid   = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid ) );
[580]1750    lpid  = LPID_FROM_PID( pid );
[564]1751    state = hal_remote_l32( XPTR( process_cxy , &process_ptr->term_state ) );
[428]1752
[580]1753    // get process PPID
[564]1754    parent_xp  = hal_remote_l64( XPTR( process_cxy , &process_ptr->parent_xp ) );
[428]1755    parent_cxy = GET_CXY( parent_xp );
1756    parent_ptr = GET_PTR( parent_xp );
[564]1757    ppid       = hal_remote_l32( XPTR( parent_cxy , &parent_ptr->pid ) );
[428]1758
1759    // get number of threads
[564]1760    th_nr      = hal_remote_l32( XPTR( process_cxy , &process_ptr->th_nr ) );
[428]1761
[443]1762    // get pointers on owner process descriptor
[564]1763    owner_xp  = hal_remote_l64( XPTR( process_cxy , &process_ptr->owner_xp ) );
[443]1764    owner_cxy = GET_CXY( owner_xp );
1765    owner_ptr = GET_PTR( owner_xp );
[428]1766
[580]1767    // get process TXT name and .elf name
1768    if( lpid )                                   // user process
1769    {
[443]1770
[580]1771        // get extended pointer on file descriptor associated to TXT_RX
1772        txt_file_xp = hal_remote_l64( XPTR( owner_cxy , &owner_ptr->fd_array.array[0] ) );
[428]1773
[580]1774        assert( (txt_file_xp != XPTR_NULL) ,
1775        "process must be attached to one TXT terminal\n" ); 
[443]1776
[580]1777        // get TXT_RX chdev pointers
1778        txt_chdev_xp  = chdev_from_file( txt_file_xp );
1779        txt_chdev_cxy = GET_CXY( txt_chdev_xp );
1780        txt_chdev_ptr = GET_PTR( txt_chdev_xp );
1781
1782        // get TXT_RX name and ownership
1783        hal_remote_strcpy( XPTR( local_cxy , txt_name ) ,
1784                           XPTR( txt_chdev_cxy , txt_chdev_ptr->name ) );
[428]1785   
[580]1786        txt_owner_xp = (xptr_t)hal_remote_l64( XPTR( txt_chdev_cxy, 
1787                                                     &txt_chdev_ptr->ext.txt.owner_xp ) );
[428]1788
[580]1789        // get process .elf name
1790        elf_file_xp   = hal_remote_l64( XPTR( process_cxy , &process_ptr->vfs_bin_xp ) );
1791        elf_file_cxy  = GET_CXY( elf_file_xp );
1792        elf_file_ptr  = GET_PTR( elf_file_xp );
1793        elf_inode_ptr = hal_remote_lpt( XPTR( elf_file_cxy , &elf_file_ptr->inode ) );
1794        vfs_inode_get_name( XPTR( elf_file_cxy , elf_inode_ptr ) , elf_name );
1795    }
1796    else                                         // kernel process_zero
1797    {
1798        // TXT name and .elf name are not registered in kernel process_zero
1799        strcpy( txt_name , "txt0_rx" );
1800        txt_owner_xp = process_xp; 
1801        strcpy( elf_name , "kernel.elf" );
1802    }
1803
[428]1804    // display process info
[443]1805    if( txt_owner_xp == process_xp )
[428]1806    {
[581]1807        nolock_printk("PID %X | %s (FG) | %X | PPID %X | TS %X | %d | %s\n", 
1808        pid, txt_name, process_ptr, ppid, state, th_nr, elf_name );
[428]1809    }
1810    else
1811    {
[581]1812        nolock_printk("PID %X | %s (BG) | %X | PPID %X | TS %X | %d | %s\n", 
1813        pid, txt_name, process_ptr, ppid, state, th_nr, elf_name );
[428]1814    }
1815}  // end process_display()
1816
1817
1818////////////////////////////////////////////////////////////////////////////////////////
1819//     Terminals related functions
1820////////////////////////////////////////////////////////////////////////////////////////
1821
[581]1822//////////////////////////////////
[485]1823uint32_t process_txt_alloc( void )
[428]1824{
1825    uint32_t  index;       // TXT terminal index
1826    xptr_t    chdev_xp;    // extended pointer on TXT_RX chdev
1827    chdev_t * chdev_ptr;   // local pointer on TXT_RX chdev
1828    cxy_t     chdev_cxy;   // TXT_RX chdev cluster
1829    xptr_t    root_xp;     // extended pointer on owner field in chdev
1830
1831    // scan the user TXT_RX chdevs (TXT0 is reserved for kernel)
1832    for( index = 1 ; index < LOCAL_CLUSTER->nb_txt_channels ; index ++ )
1833    {
1834        // get pointers on TXT_RX[index]
1835        chdev_xp  = chdev_dir.txt_rx[index];
1836        chdev_cxy = GET_CXY( chdev_xp );
1837        chdev_ptr = GET_PTR( chdev_xp );
1838
1839        // get extended pointer on root of attached process
1840        root_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.root );
1841
1842        // return free TXT index if found
1843        if( xlist_is_empty( root_xp ) ) return index; 
1844    }
1845
[492]1846    assert( false , "no free TXT terminal found" );
[428]1847
1848    return -1;
1849
1850} // end process_txt_alloc()
1851
1852/////////////////////////////////////////////
1853void process_txt_attach( process_t * process,
1854                         uint32_t    txt_id )
1855{
1856    xptr_t      chdev_xp;     // extended pointer on TXT_RX chdev
1857    cxy_t       chdev_cxy;    // TXT_RX chdev cluster
1858    chdev_t *   chdev_ptr;    // local pointer on TXT_RX chdev
1859    xptr_t      root_xp;      // extended pointer on list root in chdev
1860    xptr_t      lock_xp;      // extended pointer on list lock in chdev
1861
[564]1862// check process is in owner cluster
1863assert( (CXY_FROM_PID( process->pid ) == local_cxy) ,
1864"process descriptor not in owner cluster" );
[428]1865
[564]1866// check terminal index
1867assert( (txt_id < LOCAL_CLUSTER->nb_txt_channels) ,
1868"illegal TXT terminal index" );
[428]1869
1870    // get pointers on TXT_RX[txt_id] chdev
1871    chdev_xp  = chdev_dir.txt_rx[txt_id];
1872    chdev_cxy = GET_CXY( chdev_xp );
1873    chdev_ptr = GET_PTR( chdev_xp );
1874
1875    // get extended pointer on root & lock of attached process list
1876    root_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.root );
1877    lock_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.lock );
1878
[564]1879    // get lock protecting list of processes attached to TXT
1880    remote_busylock_acquire( lock_xp );
1881
[428]1882    // insert process in attached process list
1883    xlist_add_last( root_xp , XPTR( local_cxy , &process->txt_list ) );
1884
[564]1885    // release lock protecting list of processes attached to TXT
1886    remote_busylock_release( lock_xp );
1887
[446]1888#if DEBUG_PROCESS_TXT
[610]1889thread_t * this = CURRENT_THREAD;
[457]1890uint32_t cycle = (uint32_t)hal_get_cycles();
[446]1891if( DEBUG_PROCESS_TXT < cycle )
[610]1892printk("\n[%s] thread[%x,%x] attached process %x to TXT %d / cycle %d\n",
1893__FUNCTION__, this->process->pid, this->trdid, process->pid, txt_id , cycle );
[433]1894#endif
[428]1895
1896} // end process_txt_attach()
1897
[436]1898/////////////////////////////////////////////
1899void process_txt_detach( xptr_t  process_xp )
[428]1900{
[436]1901    process_t * process_ptr;  // local pointer on process in owner cluster
1902    cxy_t       process_cxy;  // process owner cluster
1903    pid_t       process_pid;  // process identifier
1904    xptr_t      file_xp;      // extended pointer on stdin file
[428]1905    xptr_t      chdev_xp;     // extended pointer on TXT_RX chdev
1906    cxy_t       chdev_cxy;    // TXT_RX chdev cluster
1907    chdev_t *   chdev_ptr;    // local pointer on TXT_RX chdev
1908    xptr_t      lock_xp;      // extended pointer on list lock in chdev
1909
[436]1910    // get process cluster, local pointer, and PID
1911    process_cxy = GET_CXY( process_xp );
1912    process_ptr = GET_PTR( process_xp );
[564]1913    process_pid = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid ) );
[436]1914
[564]1915// check process descriptor in owner cluster
1916assert( (CXY_FROM_PID( process_pid ) == process_cxy ) ,
1917"process descriptor not in owner cluster" );
[436]1918
1919    // release TXT ownership (does nothing if not TXT owner)
1920    process_txt_transfer_ownership( process_xp );
[428]1921
[436]1922    // get extended pointer on process stdin file
[564]1923    file_xp = (xptr_t)hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[0] ) );
[436]1924
1925    // get pointers on TXT_RX chdev
1926    chdev_xp  = chdev_from_file( file_xp );
[428]1927    chdev_cxy = GET_CXY( chdev_xp );
1928    chdev_ptr = (chdev_t *)GET_PTR( chdev_xp );
1929
[436]1930    // get extended pointer on lock protecting attached process list
[428]1931    lock_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.lock );
1932
[564]1933    // get lock protecting list of processes attached to TXT
1934    remote_busylock_acquire( lock_xp );
1935
[428]1936    // unlink process from attached process list
[436]1937    xlist_unlink( XPTR( process_cxy , &process_ptr->txt_list ) );
1938
[564]1939    // release lock protecting list of processes attached to TXT
1940    remote_busylock_release( lock_xp );
1941
[446]1942#if DEBUG_PROCESS_TXT
[610]1943thread_t * this = CURRENT_THREAD;
[457]1944uint32_t cycle  = (uint32_t)hal_get_cycles();
[564]1945uint32_t txt_id = hal_remote_l32( XPTR( chdev_cxy , &chdev_ptr->channel ) );
[446]1946if( DEBUG_PROCESS_TXT < cycle )
[610]1947printk("\n[%s] thread[%x,%x] detached process %x from TXT %d / cycle %d\n",
1948__FUNCTION__, this->process->pid, this->trdid, process_pid, txt_id, cycle );
[433]1949#endif
[428]1950
1951} // end process_txt_detach()
1952
1953///////////////////////////////////////////////////
1954void process_txt_set_ownership( xptr_t process_xp )
1955{
1956    process_t * process_ptr;
1957    cxy_t       process_cxy;
[436]1958    pid_t       process_pid;
[428]1959    xptr_t      file_xp;
1960    xptr_t      txt_xp;     
1961    chdev_t   * txt_ptr;
1962    cxy_t       txt_cxy;
1963
[436]1964    // get pointers on process in owner cluster
[428]1965    process_cxy = GET_CXY( process_xp );
[435]1966    process_ptr = GET_PTR( process_xp );
[564]1967    process_pid = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid ) );
[436]1968
1969    // check owner cluster
[492]1970    assert( (process_cxy == CXY_FROM_PID( process_pid )) ,
[436]1971    "process descriptor not in owner cluster\n" );
1972
[428]1973    // get extended pointer on stdin pseudo file
[564]1974    file_xp = hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[0] ) );
[428]1975
1976    // get pointers on TXT chdev
1977    txt_xp  = chdev_from_file( file_xp );
1978    txt_cxy = GET_CXY( txt_xp );
[435]1979    txt_ptr = GET_PTR( txt_xp );
[428]1980
1981    // set owner field in TXT chdev
[564]1982    hal_remote_s64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , process_xp );
[428]1983
[446]1984#if DEBUG_PROCESS_TXT
[610]1985thread_t * this = CURRENT_THREAD;
[457]1986uint32_t cycle  = (uint32_t)hal_get_cycles();
[564]1987uint32_t txt_id = hal_remote_l32( XPTR( txt_cxy , &txt_ptr->channel ) );
[446]1988if( DEBUG_PROCESS_TXT < cycle )
[610]1989printk("\n[%s] thread[%x,%x] give TXT %d to process %x / cycle %d\n",
1990__FUNCTION__, this->process->pid, this->trdid, txt_id, process_pid, cycle );
[436]1991#endif
1992
[428]1993}  // end process_txt_set ownership()
1994
[436]1995////////////////////////////////////////////////////////
1996void process_txt_transfer_ownership( xptr_t process_xp )
[428]1997{
[436]1998    process_t * process_ptr;     // local pointer on process releasing ownership
1999    cxy_t       process_cxy;     // process cluster
2000    pid_t       process_pid;     // process identifier
[428]2001    xptr_t      file_xp;         // extended pointer on TXT_RX pseudo file
2002    xptr_t      txt_xp;          // extended pointer on TXT_RX chdev
[433]2003    chdev_t   * txt_ptr;         // local pointer on TXT_RX chdev
2004    cxy_t       txt_cxy;         // cluster of TXT_RX chdev
2005    uint32_t    txt_id;          // TXT_RX channel
[428]2006    xptr_t      owner_xp;        // extended pointer on current TXT_RX owner
2007    xptr_t      root_xp;         // extended pointer on root of attached process list
[436]2008    xptr_t      lock_xp;         // extended pointer on lock protecting attached process list
[428]2009    xptr_t      iter_xp;         // iterator for xlist
2010    xptr_t      current_xp;      // extended pointer on current process
[433]2011    process_t * current_ptr;     // local pointer on current process
2012    cxy_t       current_cxy;     // cluster for current process
[428]2013
[457]2014#if DEBUG_PROCESS_TXT
[610]2015thread_t * this  = CURRENT_THREAD;
2016uint32_t   cycle;
[457]2017#endif
2018
[436]2019    // get pointers on process in owner cluster
[428]2020    process_cxy = GET_CXY( process_xp );
[435]2021    process_ptr = GET_PTR( process_xp );
[564]2022    process_pid = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid ) );
[436]2023
2024    // check owner cluster
[492]2025    assert( (process_cxy == CXY_FROM_PID( process_pid )) ,
[436]2026    "process descriptor not in owner cluster\n" );
2027
[428]2028    // get extended pointer on stdin pseudo file
[564]2029    file_xp = hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[0] ) );
[428]2030
2031    // get pointers on TXT chdev
2032    txt_xp  = chdev_from_file( file_xp );
2033    txt_cxy = GET_CXY( txt_xp );
[433]2034    txt_ptr = GET_PTR( txt_xp );
[428]2035
[433]2036    // get extended pointer on TXT_RX owner and TXT channel
[564]2037    owner_xp = hal_remote_l64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) );
2038    txt_id   = hal_remote_l32 ( XPTR( txt_cxy , &txt_ptr->channel ) );
[428]2039
[436]2040    // transfer ownership only if process is the TXT owner
2041    if( (owner_xp == process_xp) && (txt_id > 0) ) 
[428]2042    {
[436]2043        // get extended pointers on root and lock of attached processes list
2044        root_xp = XPTR( txt_cxy , &txt_ptr->ext.txt.root );
2045        lock_xp = XPTR( txt_cxy , &txt_ptr->ext.txt.lock );
[428]2046
[436]2047        // get lock
[564]2048        remote_busylock_acquire( lock_xp );
[436]2049
2050        if( process_get_ppid( process_xp ) != 1 )           // process is not KSH
[428]2051        {
[436]2052            // scan attached process list to find KSH process
2053            XLIST_FOREACH( root_xp , iter_xp )
2054            {
2055                current_xp  = XLIST_ELEMENT( iter_xp , process_t , txt_list );
2056                current_cxy = GET_CXY( current_xp );
2057                current_ptr = GET_PTR( current_xp );
[435]2058
[436]2059                if( process_get_ppid( current_xp ) == 1 )  // current is KSH
2060                {
2061                    // release lock
[564]2062                    remote_busylock_release( lock_xp );
[436]2063
2064                    // set owner field in TXT chdev
[564]2065                    hal_remote_s64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , current_xp );
[436]2066
[446]2067#if DEBUG_PROCESS_TXT
[610]2068cycle = (uint32_t)hal_get_cycles();
[564]2069uint32_t ksh_pid = hal_remote_l32( XPTR( current_cxy , &current_ptr->pid ) );
[446]2070if( DEBUG_PROCESS_TXT < cycle )
[610]2071printk("\n[%s] thread[%x,%x] release TXT %d to KSH %x / cycle %d\n",
2072__FUNCTION__, this->process->pid, this->trdid, txt_id, ksh_pid, cycle );
[457]2073process_txt_display( txt_id );
[436]2074#endif
2075                     return;
2076                }
2077            }
2078 
2079            // release lock
[564]2080            remote_busylock_release( lock_xp );
[436]2081
2082            // PANIC if KSH not found
[492]2083            assert( false , "KSH process not found for TXT %d" );
[436]2084
2085            return;
2086        }
2087        else                                               // process is KSH
2088        {
2089            // scan attached process list to find another process
2090            XLIST_FOREACH( root_xp , iter_xp )
[428]2091            {
[436]2092                current_xp  = XLIST_ELEMENT( iter_xp , process_t , txt_list );
2093                current_cxy = GET_CXY( current_xp );
2094                current_ptr = GET_PTR( current_xp );
2095
2096                if( current_xp != process_xp )            // current is not KSH
2097                {
2098                    // release lock
[564]2099                    remote_busylock_release( lock_xp );
[436]2100
2101                    // set owner field in TXT chdev
[564]2102                    hal_remote_s64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , current_xp );
[436]2103
[446]2104#if DEBUG_PROCESS_TXT
[610]2105cycle  = (uint32_t)hal_get_cycles();
[564]2106uint32_t new_pid = hal_remote_l32( XPTR( current_cxy , &current_ptr->pid ) );
[446]2107if( DEBUG_PROCESS_TXT < cycle )
[610]2108printk("\n[%s] thread[%x,%x] release TXT %d to process %x / cycle %d\n",
2109__FUNCTION__,this->process->pid, this->trdid, txt_id, new_pid, cycle );
[457]2110process_txt_display( txt_id );
[436]2111#endif
2112                     return;
2113                }
[428]2114            }
[436]2115
2116            // release lock
[564]2117            remote_busylock_release( lock_xp );
[436]2118
2119            // no more owner for TXT if no other process found
[564]2120            hal_remote_s64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , XPTR_NULL );
[436]2121
[446]2122#if DEBUG_PROCESS_TXT
[436]2123cycle = (uint32_t)hal_get_cycles();
[446]2124if( DEBUG_PROCESS_TXT < cycle )
[610]2125printk("\n[%s] thread[%x,%x] release TXT %d to nobody / cycle %d\n",
2126__FUNCTION__, this->process->pid, this->trdid, txt_id, cycle );
[457]2127process_txt_display( txt_id );
[436]2128#endif
2129            return;
[428]2130        }
[436]2131    }
2132    else
2133    {
[433]2134
[446]2135#if DEBUG_PROCESS_TXT
[436]2136cycle = (uint32_t)hal_get_cycles();
[446]2137if( DEBUG_PROCESS_TXT < cycle )
[593]2138printk("\n[%s] thread %x in process %d does nothing (not TXT owner) / cycle %d\n",
[610]2139__FUNCTION__, this->trdid, process_pid, cycle );
[457]2140process_txt_display( txt_id );
[436]2141#endif
2142
[428]2143    }
[436]2144}  // end process_txt_transfer_ownership()
[428]2145
2146
[564]2147////////////////////////////////////////////////
2148bool_t process_txt_is_owner( xptr_t process_xp )
[457]2149{
2150    // get local pointer and cluster of process in owner cluster
2151    cxy_t       process_cxy = GET_CXY( process_xp );
2152    process_t * process_ptr = GET_PTR( process_xp );
2153
[564]2154// check calling thread execute in target process owner cluster
2155pid_t process_pid = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid ) );
2156assert( (process_cxy == CXY_FROM_PID( process_pid )) ,
2157"process descriptor not in owner cluster\n" );
[457]2158
2159    // get extended pointer on stdin pseudo file
[564]2160    xptr_t file_xp = hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[0] ) );
[457]2161
2162    // get pointers on TXT chdev
2163    xptr_t    txt_xp  = chdev_from_file( file_xp );
2164    cxy_t     txt_cxy = GET_CXY( txt_xp );
2165    chdev_t * txt_ptr = GET_PTR( txt_xp );
2166
2167    // get extended pointer on TXT_RX owner process
[564]2168    xptr_t owner_xp = hal_remote_l64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) );
[457]2169
2170    return (process_xp == owner_xp);
2171
2172}   // end process_txt_is_owner()
2173
[436]2174////////////////////////////////////////////////     
2175xptr_t process_txt_get_owner( uint32_t channel )
[435]2176{
2177    xptr_t      txt_rx_xp  = chdev_dir.txt_rx[channel];
2178    cxy_t       txt_rx_cxy = GET_CXY( txt_rx_xp );
2179    chdev_t *   txt_rx_ptr = GET_PTR( txt_rx_xp );
2180
[564]2181    return (xptr_t)hal_remote_l64( XPTR( txt_rx_cxy , &txt_rx_ptr->ext.txt.owner_xp ) );
[435]2182
[457]2183}  // end process_txt_get_owner()
2184
[435]2185///////////////////////////////////////////
2186void process_txt_display( uint32_t txt_id )
2187{
2188    xptr_t      chdev_xp;
2189    cxy_t       chdev_cxy;
2190    chdev_t   * chdev_ptr;
2191    xptr_t      root_xp;
2192    xptr_t      lock_xp;
2193    xptr_t      current_xp;
2194    xptr_t      iter_xp;
[443]2195    cxy_t       txt0_cxy;
2196    chdev_t   * txt0_ptr;
2197    xptr_t      txt0_xp;
2198    xptr_t      txt0_lock_xp;
2199   
[435]2200    assert( (txt_id < LOCAL_CLUSTER->nb_txt_channels) ,
[492]2201    "illegal TXT terminal index" );
[435]2202
[443]2203    // get pointers on TXT0 chdev
2204    txt0_xp  = chdev_dir.txt_tx[0];
2205    txt0_cxy = GET_CXY( txt0_xp );
2206    txt0_ptr = GET_PTR( txt0_xp );
2207
2208    // get extended pointer on TXT0 lock
2209    txt0_lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock );
2210
[435]2211    // get pointers on TXT_RX[txt_id] chdev
2212    chdev_xp  = chdev_dir.txt_rx[txt_id];
2213    chdev_cxy = GET_CXY( chdev_xp );
2214    chdev_ptr = GET_PTR( chdev_xp );
2215
2216    // get extended pointer on root & lock of attached process list
2217    root_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.root );
2218    lock_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.lock );
2219
[443]2220    // get lock on attached process list
[564]2221    remote_busylock_acquire( lock_xp );
[443]2222
2223    // get TXT0 lock in busy waiting mode
[564]2224    remote_busylock_acquire( txt0_lock_xp );
[443]2225
[435]2226    // display header
[443]2227    nolock_printk("\n***** processes attached to TXT_%d / cycle %d\n",
2228    txt_id , (uint32_t)hal_get_cycles() );
[435]2229
[436]2230    // scan attached process list
[435]2231    XLIST_FOREACH( root_xp , iter_xp )
2232    {
2233        current_xp  = XLIST_ELEMENT( iter_xp , process_t , txt_list );
2234        process_display( current_xp );
2235    }
2236
[443]2237    // release TXT0 lock in busy waiting mode
[564]2238    remote_busylock_release( txt0_lock_xp );
[443]2239
2240    // release lock on attached process list
[564]2241    remote_busylock_release( lock_xp );
[435]2242
2243}  // end process_txt_display
Note: See TracBrowser for help on using the repository browser.