source: trunk/kernel/kern/process.c @ 618

Last change on this file since 618 was 618, checked in by alain, 5 years ago

fix a bug in process_destroy() : the vmm_destroy() must be called before
the PID release.

File size: 78.3 KB
RevLine 
[1]1/*
[564]2 * process.c - process related functions definition.
[172]3 *
[1]4 * Authors  Ghassan Almaless (2008,2009,2010,2011,2012)
5 *          Mohamed Lamine Karaoui (2015)
[618]6 *          Alain Greiner (2016,2017,2018,2019)
[1]7 *
8 * Copyright (c) UPMC Sorbonne Universites
9 *
[409]10 * This file is part of ALMOS-MKH.
[1]11 *
[172]12 * ALMOS-MKH is free software; you can redistribute it and/or modify it
[1]13 * under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; version 2.0 of the License.
15 *
[172]16 * ALMOS-MKH is distributed in the hope that it will be useful, but
[1]17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
[172]22 * along with ALMOS-MKH; if not, write to the Free Software Foundation,
[1]23 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24 */
25
[14]26#include <kernel_config.h>
[457]27#include <hal_kernel_types.h>
[1]28#include <hal_remote.h>
29#include <hal_uspace.h>
[409]30#include <hal_irqmask.h>
[1]31#include <errno.h>
32#include <printk.h>
33#include <memcpy.h>
34#include <bits.h>
35#include <kmem.h>
36#include <page.h>
37#include <vmm.h>
38#include <vfs.h>
39#include <core.h>
40#include <thread.h>
[428]41#include <chdev.h>
[1]42#include <list.h>
[407]43#include <string.h>
[1]44#include <scheduler.h>
[564]45#include <busylock.h>
46#include <queuelock.h>
47#include <remote_queuelock.h>
48#include <rwlock.h>
49#include <remote_rwlock.h>
[1]50#include <dqdt.h>
51#include <cluster.h>
52#include <ppm.h>
53#include <boot_info.h>
54#include <process.h>
55#include <elf.h>
[23]56#include <syscalls.h>
[435]57#include <shared_syscalls.h>
[1]58
59//////////////////////////////////////////////////////////////////////////////////////////
60// Extern global variables
61//////////////////////////////////////////////////////////////////////////////////////////
62
[428]63extern process_t           process_zero;     // allocated in kernel_init.c
64extern chdev_directory_t   chdev_dir;        // allocated in kernel_init.c
[1]65
66//////////////////////////////////////////////////////////////////////////////////////////
67// Process initialisation related functions
68//////////////////////////////////////////////////////////////////////////////////////////
69
[583]70/////////////////////////////////
[503]71process_t * process_alloc( void )
[1]72{
73        kmem_req_t   req;
74
75    req.type  = KMEM_PROCESS;
76        req.size  = sizeof(process_t);
77        req.flags = AF_KERNEL;
78
79    return (process_t *)kmem_alloc( &req );
80}
81
82////////////////////////////////////////
83void process_free( process_t * process )
84{
85    kmem_req_t  req;
86
87        req.type = KMEM_PROCESS;
88        req.ptr  = process;
89        kmem_free( &req );
90}
91
[101]92/////////////////////////////////////////////////
93void process_reference_init( process_t * process,
94                             pid_t       pid,
[457]95                             xptr_t      parent_xp )
[1]96{
[610]97    xptr_t      process_xp;
[428]98    cxy_t       parent_cxy;
99    process_t * parent_ptr;
[407]100    xptr_t      stdin_xp;
101    xptr_t      stdout_xp;
102    xptr_t      stderr_xp;
103    uint32_t    stdin_id;
104    uint32_t    stdout_id;
105    uint32_t    stderr_id;
[415]106    error_t     error;
[428]107    uint32_t    txt_id;
108    char        rx_path[40];
109    char        tx_path[40];
[440]110    xptr_t      file_xp;
[428]111    xptr_t      chdev_xp;
112    chdev_t *   chdev_ptr;
113    cxy_t       chdev_cxy;
114    pid_t       parent_pid;
[1]115
[610]116    // build extended pointer on this reference process
117    process_xp = XPTR( local_cxy , process );
118
[428]119    // get parent process cluster and local pointer
120    parent_cxy = GET_CXY( parent_xp );
[435]121    parent_ptr = GET_PTR( parent_xp );
[204]122
[457]123    // get parent_pid
[564]124    parent_pid = hal_remote_l32( XPTR( parent_cxy , &parent_ptr->pid ) );
[428]125
[438]126#if DEBUG_PROCESS_REFERENCE_INIT
[610]127thread_t * this = CURRENT_THREAD;
[433]128uint32_t cycle = (uint32_t)hal_get_cycles();
[610]129if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
130printk("\n[%s] thread[%x,%x] enter to initalialize process %x / cycle %d\n",
131__FUNCTION__, parent_pid, this->trdid, pid, cycle );
[433]132#endif
[428]133
[610]134    // initialize pid, ref_xp, parent_xp, owner_xp, term_state fields
[433]135        process->pid        = pid;
136    process->ref_xp     = XPTR( local_cxy , process );
[443]137    process->owner_xp   = XPTR( local_cxy , process );
[433]138    process->parent_xp  = parent_xp;
139    process->term_state = 0;
[428]140
[610]141    // initialize VFS root inode and CWD inode
142    process->vfs_root_xp = hal_remote_l64( XPTR( parent_cxy, &parent_ptr->vfs_root_xp ) );
143    process->cwd_xp      = hal_remote_l64( XPTR( parent_cxy, &parent_ptr->cwd_xp ) );
144
[409]145    // initialize vmm as empty
[415]146    error = vmm_init( process );
[564]147
148assert( (error == 0) , "cannot initialize VMM\n" );
[415]149 
[438]150#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
[433]151cycle = (uint32_t)hal_get_cycles();
[610]152if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
153printk("\n[%s] thread[%x,%x] / vmm empty for process %x / cycle %d\n", 
154__FUNCTION__, parent_pid, this->trdid, pid, cycle );
[433]155#endif
[1]156
[409]157    // initialize fd_array as empty
[408]158    process_fd_init( process );
[1]159
[428]160    // define the stdin/stdout/stderr pseudo files <=> select a TXT terminal.
[581]161    if( (pid == 1) || (parent_pid  == 1) )      // INIT or KSH  process
[408]162    {
[581]163        // select a TXT channel
164        if( pid == 1 )  txt_id = 0;                     // INIT
165        else            txt_id = process_txt_alloc();   // KSH
[428]166
[457]167        // attach process to TXT
[428]168        process_txt_attach( process , txt_id ); 
169
[457]170#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
171cycle = (uint32_t)hal_get_cycles();
[610]172if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
173printk("\n[%s] thread[%x,%x] / process %x attached to TXT%d / cycle %d\n", 
174__FUNCTION__, parent_pid, this->trdid, pid, txt_id, cycle );
[457]175#endif
[428]176        // build path to TXT_RX[i] and TXT_TX[i] chdevs
177        snprintf( rx_path , 40 , "/dev/external/txt%d_rx", txt_id );
178        snprintf( tx_path , 40 , "/dev/external/txt%d_tx", txt_id );
179
180        // create stdin pseudo file         
[610]181        error = vfs_open(  process->vfs_root_xp,
[428]182                           rx_path,
[610]183                           process_xp,
[408]184                           O_RDONLY, 
185                           0,                // FIXME chmod
186                           &stdin_xp, 
187                           &stdin_id );
[1]188
[564]189assert( (error == 0) , "cannot open stdin pseudo file" );
190assert( (stdin_id == 0) , "stdin index must be 0" );
[428]191
[440]192#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
193cycle = (uint32_t)hal_get_cycles();
[610]194if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
195printk("\n[%s] thread[%x,%x] / stdin open for process %x / cycle %d\n", 
196__FUNCTION__, parent_pid, this->trdid, pid, cycle );
[440]197#endif
198
[428]199        // create stdout pseudo file         
[610]200        error = vfs_open(  process->vfs_root_xp,
[428]201                           tx_path,
[610]202                           process_xp,
[408]203                           O_WRONLY, 
204                           0,                // FIXME chmod
205                           &stdout_xp, 
206                           &stdout_id );
[1]207
[492]208        assert( (error == 0) , "cannot open stdout pseudo file" );
209        assert( (stdout_id == 1) , "stdout index must be 1" );
[428]210
[440]211#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
212cycle = (uint32_t)hal_get_cycles();
[610]213if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
214printk("\n[%s] thread[%x,%x] / stdout open for process %x / cycle %d\n", 
215__FUNCTION__, parent_pid, this->trdid, pid, cycle );
[440]216#endif
217
[428]218        // create stderr pseudo file         
[610]219        error = vfs_open(  process->vfs_root_xp,
[428]220                           tx_path,
[610]221                           process_xp,
[408]222                           O_WRONLY, 
223                           0,                // FIXME chmod
224                           &stderr_xp, 
225                           &stderr_id );
[428]226
[492]227        assert( (error == 0) , "cannot open stderr pseudo file" );
228        assert( (stderr_id == 2) , "stderr index must be 2" );
[428]229
[440]230#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
231cycle = (uint32_t)hal_get_cycles();
[610]232if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
233printk("\n[%s] thread[%x,%x] / stderr open for process %x / cycle %d\n", 
234__FUNCTION__, parent_pid, this->trdid, pid, cycle );
[440]235#endif
236
[408]237    }
[428]238    else                                            // normal user process
[408]239    {
[457]240        // get extended pointer on stdin pseudo file in parent process
[564]241        file_xp = (xptr_t)hal_remote_l64( XPTR( parent_cxy , &parent_ptr->fd_array.array[0] ) );
[440]242
[457]243        // get extended pointer on parent process TXT chdev
[440]244        chdev_xp = chdev_from_file( file_xp );
[428]245 
246        // get cluster and local pointer on chdev
247        chdev_cxy = GET_CXY( chdev_xp );
[435]248        chdev_ptr = GET_PTR( chdev_xp );
[428]249 
[564]250        // get parent process TXT terminal index
251        txt_id = hal_remote_l32( XPTR( chdev_cxy , &chdev_ptr->channel ) );
[407]252
[564]253        // attach child process to parent process TXT terminal
[428]254        process_txt_attach( process , txt_id ); 
[407]255
[457]256        // copy all open files from parent process fd_array to this process
[428]257        process_fd_remote_copy( XPTR( local_cxy , &process->fd_array ),
[457]258                                XPTR( parent_cxy , &parent_ptr->fd_array ) );
[408]259    }
[407]260
[610]261    // initialize lock protecting CWD changes
262    remote_busylock_init( XPTR( local_cxy , &process->cwd_lock ), LOCK_PROCESS_CWD );
[408]263
[438]264#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
[433]265cycle = (uint32_t)hal_get_cycles();
[610]266if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
267printk("\n[%s] thread[%x,%x] / set fd_array for process %x / cycle %d\n", 
268__FUNCTION__, parent_pid, this->trdid, pid , cycle );
[433]269#endif
[407]270
[408]271    // reset children list root
272    xlist_root_init( XPTR( local_cxy , &process->children_root ) );
273    process->children_nr     = 0;
[564]274    remote_queuelock_init( XPTR( local_cxy , &process->children_lock ), LOCK_PROCESS_CHILDREN );
[407]275
[611]276    // reset semaphore / mutex / barrier / condvar list roots and lock
[408]277    xlist_root_init( XPTR( local_cxy , &process->sem_root ) );
278    xlist_root_init( XPTR( local_cxy , &process->mutex_root ) );
279    xlist_root_init( XPTR( local_cxy , &process->barrier_root ) );
280    xlist_root_init( XPTR( local_cxy , &process->condvar_root ) );
[564]281    remote_queuelock_init( XPTR( local_cxy , &process->sync_lock ), LOCK_PROCESS_USERSYNC );
[407]282
[611]283    // reset open directories root and lock
284    xlist_root_init( XPTR( local_cxy , &process->dir_root ) );
285    remote_queuelock_init( XPTR( local_cxy , &process->dir_lock ), LOCK_PROCESS_DIR );
286
[408]287    // register new process in the local cluster manager pref_tbl[]
288    lpid_t lpid = LPID_FROM_PID( pid );
289    LOCAL_CLUSTER->pmgr.pref_tbl[lpid] = XPTR( local_cxy , process );
[407]290
[408]291    // register new process descriptor in local cluster manager local_list
292    cluster_process_local_link( process );
[407]293
[408]294    // register new process descriptor in local cluster manager copies_list
295    cluster_process_copies_link( process );
[172]296
[564]297    // initialize th_tbl[] array and associated threads
[1]298    uint32_t i;
[564]299
300    for( i = 0 ; i < CONFIG_THREADS_MAX_PER_CLUSTER ; i++ )
[1]301        {
302        process->th_tbl[i] = NULL;
303    }
304    process->th_nr  = 0;
[564]305    rwlock_init( &process->th_lock , LOCK_PROCESS_THTBL );
[1]306
[124]307        hal_fence();
[1]308
[438]309#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
[433]310cycle = (uint32_t)hal_get_cycles();
[610]311if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
312printk("\n[%s] thread[%x,%x] exit for process %x / cycle %d\n", 
313__FUNCTION__, parent_pid, this->trdid, pid, cycle );
[433]314#endif
[101]315
[428]316}  // process_reference_init()
[204]317
[1]318/////////////////////////////////////////////////////
319error_t process_copy_init( process_t * local_process,
320                           xptr_t      reference_process_xp )
321{
[415]322    error_t error;
323
[23]324    // get reference process cluster and local pointer
325    cxy_t       ref_cxy = GET_CXY( reference_process_xp );
[435]326    process_t * ref_ptr = GET_PTR( reference_process_xp );
[1]327
[428]328    // initialize PID, REF_XP, PARENT_XP, and STATE
[564]329    local_process->pid        = hal_remote_l32(  XPTR( ref_cxy , &ref_ptr->pid ) );
330    local_process->parent_xp  = hal_remote_l64( XPTR( ref_cxy , &ref_ptr->parent_xp ) );
[433]331    local_process->ref_xp     = reference_process_xp;
[443]332    local_process->owner_xp   = reference_process_xp;
[433]333    local_process->term_state = 0;
[407]334
[564]335#if DEBUG_PROCESS_COPY_INIT
[610]336thread_t * this = CURRENT_THREAD; 
[433]337uint32_t cycle = (uint32_t)hal_get_cycles();
[610]338if( DEBUG_PROCESS_COPY_INIT < cycle )
339printk("\n[%s] thread[%x,%x] enter for process %x / cycle %d\n",
340__FUNCTION__, this->process->pid, this->trdid, local_process->pid, cycle );
[433]341#endif
[407]342
[564]343// check user process
344assert( (local_process->pid != 0), "PID cannot be 0" );
345
[172]346    // reset local process vmm
[415]347    error = vmm_init( local_process );
[492]348    assert( (error == 0) , "cannot initialize VMM\n");
[1]349
[172]350    // reset process file descriptors array
[23]351        process_fd_init( local_process );
[1]352
[610]353    // reset vfs_root_xp / vfs_bin_xp / cwd_xp fields
[564]354    local_process->vfs_root_xp = hal_remote_l64( XPTR( ref_cxy , &ref_ptr->vfs_root_xp ) );
355    local_process->vfs_bin_xp  = hal_remote_l64( XPTR( ref_cxy , &ref_ptr->vfs_bin_xp ) );
[610]356    local_process->cwd_xp      = XPTR_NULL;
[1]357
358    // reset children list root (not used in a process descriptor copy)
359    xlist_root_init( XPTR( local_cxy , &local_process->children_root ) );
[172]360    local_process->children_nr   = 0;
[564]361    remote_queuelock_init( XPTR( local_cxy , &local_process->children_lock ),
362                           LOCK_PROCESS_CHILDREN );
[1]363
[428]364    // reset children_list (not used in a process descriptor copy)
365    xlist_entry_init( XPTR( local_cxy , &local_process->children_list ) );
[1]366
367    // reset semaphores list root (not used in a process descriptor copy)
368    xlist_root_init( XPTR( local_cxy , &local_process->sem_root ) );
[23]369    xlist_root_init( XPTR( local_cxy , &local_process->mutex_root ) );
370    xlist_root_init( XPTR( local_cxy , &local_process->barrier_root ) );
371    xlist_root_init( XPTR( local_cxy , &local_process->condvar_root ) );
[1]372
[564]373    // initialize th_tbl[] array and associated fields
[1]374    uint32_t i;
[564]375    for( i = 0 ; i < CONFIG_THREADS_MAX_PER_CLUSTER ; i++ )
[1]376        {
377        local_process->th_tbl[i] = NULL;
378    }
379    local_process->th_nr  = 0;
[564]380    rwlock_init( &local_process->th_lock , LOCK_PROCESS_THTBL );
[1]381
[564]382
[1]383    // register new process descriptor in local cluster manager local_list
384    cluster_process_local_link( local_process );
385
386    // register new process descriptor in owner cluster manager copies_list
387    cluster_process_copies_link( local_process );
388
[124]389        hal_fence();
[1]390
[438]391#if DEBUG_PROCESS_COPY_INIT
[433]392cycle = (uint32_t)hal_get_cycles();
[610]393if( DEBUG_PROCESS_COPY_INIT < cycle )
394printk("\n[%s] thread[%x,%x] exit for process %x / cycle %d\n",
395__FUNCTION__, this->process->pid, this->trdid, local_process->pid, cycle );
[433]396#endif
[279]397
[1]398    return 0;
399
[204]400} // end process_copy_init()
401
[1]402///////////////////////////////////////////
403void process_destroy( process_t * process )
404{
[428]405    xptr_t      parent_xp;
406    process_t * parent_ptr;
407    cxy_t       parent_cxy;
408    xptr_t      children_lock_xp;
[446]409    xptr_t      children_nr_xp;
[1]410
[437]411    pid_t       pid = process->pid;
412
[593]413// check no more threads
[618]414assert( (process->th_nr == 0),
415"process %x in cluster %x contains threads", pid , local_cxy );
[428]416
[438]417#if DEBUG_PROCESS_DESTROY
[610]418thread_t * this = CURRENT_THREAD;
[433]419uint32_t cycle = (uint32_t)hal_get_cycles();
[610]420if( DEBUG_PROCESS_DESTROY < cycle )
421printk("\n[%s] thread[%x,%x] enter for process %x in cluster %x / cycle %d\n",
422__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy, cycle );
[433]423#endif
[428]424
[618]425    // Destroy VMM
426    vmm_destroy( process );
427
428#if (DEBUG_PROCESS_DESTROY & 1)
429if( DEBUG_PROCESS_DESTROY < cycle )
430printk("\n[%s] thread[%x,%x] destroyed VMM for process %x in cluster %x\n",
431__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy );
432#endif
433
[436]434    // remove process from local_list in local cluster manager
435    cluster_process_local_unlink( process );
[1]436
[618]437#if (DEBUG_PROCESS_DESTROY & 1)
438if( DEBUG_PROCESS_DESTROY < cycle )
439printk("\n[%s] thread[%x,%x] removed process %x in cluster %x from local list\n",
440__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy );
441#endif
442
[436]443    // remove process from copies_list in owner cluster manager
444    cluster_process_copies_unlink( process );
[23]445
[618]446#if (DEBUG_PROCESS_DESTROY & 1)
447if( DEBUG_PROCESS_DESTROY < cycle )
448printk("\n[%s] thread[%x,%x] removed process %x in cluster %x from copies list\n",
449__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy );
450#endif
451
[450]452    // remove process from children_list
453    // and release PID if owner cluster
[437]454    if( CXY_FROM_PID( pid ) == local_cxy )
[428]455    {
456        // get pointers on parent process
457        parent_xp  = process->parent_xp;
458        parent_cxy = GET_CXY( parent_xp );
459        parent_ptr = GET_PTR( parent_xp );
460
461        // get extended pointer on children_lock in parent process
462        children_lock_xp = XPTR( parent_cxy , &parent_ptr->children_lock );
[446]463        children_nr_xp   = XPTR( parent_cxy , &parent_ptr->children_nr );
[428]464
465        // remove process from children_list
[564]466        remote_queuelock_acquire( children_lock_xp );
[428]467        xlist_unlink( XPTR( local_cxy , &process->children_list ) );
[446]468            hal_remote_atomic_add( children_nr_xp , -1 );
[564]469        remote_queuelock_release( children_lock_xp );
[450]470
[618]471#if (DEBUG_PROCESS_DESTROY & 1)
472if( DEBUG_PROCESS_DESTROY < cycle )
473printk("\n[%s] thread[%x,%x] removed process %x in cluster %x from children list\n",
474__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy );
475#endif
476
[564]477        // release the process PID to cluster manager
478        cluster_pid_release( pid );
[428]479
[618]480#if (DEBUG_PROCESS_DESTROY & 1)
481if( DEBUG_PROCESS_DESTROY < cycle )
482printk("\n[%s] thread[%x,%x] released process PID %x to pmgr in cluster %x\n",
483__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy );
484#endif
[23]485
[618]486    }
[1]487
[618]488    // FIXME decrement the refcount on file pointer by vfs_bin_xp [AG]
489    // FIXME close all open files [AG]
490    // FIXME synchronize dirty files [AG]
[1]491
[416]492    // release memory allocated to process descriptor
493    process_free( process );
[1]494
[438]495#if DEBUG_PROCESS_DESTROY
[433]496cycle = (uint32_t)hal_get_cycles();
[610]497if( DEBUG_PROCESS_DESTROY < cycle )
498printk("\n[%s] thread[%x,%x] exit / process %x in cluster %x / cycle %d\n",
499__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy, cycle );
[433]500#endif
[428]501
[407]502}  // end process_destroy()
503
[583]504///////////////////////////////////////////////////////////////////
[527]505const char * process_action_str( process_sigactions_t action_type )
[409]506{
[583]507    switch ( action_type )
508    {
509        case BLOCK_ALL_THREADS:   return "BLOCK";
510        case UNBLOCK_ALL_THREADS: return "UNBLOCK";
511        case DELETE_ALL_THREADS:  return "DELETE";
512        default:                  return "undefined";
513    }
[409]514}
515
[435]516////////////////////////////////////////
517void process_sigaction( pid_t       pid,
[457]518                        uint32_t    type )
[409]519{
520    cxy_t              owner_cxy;         // owner cluster identifier
521    lpid_t             lpid;              // process index in owner cluster
522    cluster_t        * cluster;           // pointer on cluster manager
523    xptr_t             root_xp;           // extended pointer on root of copies
524    xptr_t             lock_xp;           // extended pointer on lock protecting copies
525    xptr_t             iter_xp;           // iterator on copies list
526    xptr_t             process_xp;        // extended pointer on process copy
527    cxy_t              process_cxy;       // process copy cluster identifier
[457]528    process_t        * process_ptr;       // local pointer on process copy
[436]529    reg_t              save_sr;           // for critical section
530    rpc_desc_t         rpc;               // shared RPC descriptor
[457]531    thread_t         * client;            // pointer on client thread
532    xptr_t             client_xp;         // extended pointer on client thread
533    process_t        * local;             // pointer on process copy in local cluster
534    uint32_t           remote_nr;         // number of remote process copies
[409]535
[457]536    client    = CURRENT_THREAD;
537    client_xp = XPTR( local_cxy , client );
538    local     = NULL;
539    remote_nr = 0;
[435]540
[583]541    // check calling thread can yield
542    thread_assert_can_yield( client , __FUNCTION__ );
[564]543
[438]544#if DEBUG_PROCESS_SIGACTION
[433]545uint32_t cycle = (uint32_t)hal_get_cycles();
[438]546if( DEBUG_PROCESS_SIGACTION < cycle )
[593]547printk("\n[%s] thread[%x,%x] enter to %s process %x / cycle %d\n",
[583]548__FUNCTION__ , client->process->pid, client->trdid,
[457]549process_action_str( type ) , pid , cycle );
[433]550#endif
[409]551
[436]552    // get pointer on local cluster manager
[416]553    cluster = LOCAL_CLUSTER;
554
[409]555    // get owner cluster identifier and process lpid
[435]556    owner_cxy = CXY_FROM_PID( pid );
557    lpid      = LPID_FROM_PID( pid );
[409]558
[593]559    // get root of list of copies and lock from owner cluster
[436]560    root_xp   = XPTR( owner_cxy , &cluster->pmgr.copies_root[lpid] );
561    lock_xp   = XPTR( owner_cxy , &cluster->pmgr.copies_lock[lpid] );
[435]562
[583]563// check action type
564assert( ((type == DELETE_ALL_THREADS ) ||
565         (type == BLOCK_ALL_THREADS )  ||
566         (type == UNBLOCK_ALL_THREADS )), "illegal action type" );
[416]567             
[593]568    // This client thread send parallel RPCs to all remote clusters containing
[564]569    // target process copies, wait all responses, and then handles directly
570    // the threads in local cluster, when required.
[457]571    // The client thread allocates a - shared - RPC descriptor in the stack,
572    // because all parallel, non-blocking, server threads use the same input
573    // arguments, and use the shared RPC response field
[436]574
575    // mask IRQs
576    hal_disable_irq( &save_sr);
577
[457]578    // client thread blocks itself
579    thread_block( client_xp , THREAD_BLOCKED_RPC );
[436]580
581    // initialize shared RPC descriptor
[438]582    rpc.responses = 0;
583    rpc.blocking  = false;
584    rpc.index     = RPC_PROCESS_SIGACTION;
585    rpc.thread    = client;
586    rpc.lid       = client->core->lid;
[611]587    rpc.args[0]   = pid;
588    rpc.args[1]   = type;
[436]589
[611]590    // take the lock protecting process copies
591    remote_queuelock_acquire( lock_xp );
592
[457]593    // scan list of process copies
[409]594    XLIST_FOREACH( root_xp , iter_xp )
595    {
[457]596        // get extended pointers and cluster on process
[440]597        process_xp  = XLIST_ELEMENT( iter_xp , process_t , copies_list );
598        process_cxy = GET_CXY( process_xp );
[457]599        process_ptr = GET_PTR( process_xp );
[440]600
[593]601        if( process_cxy == local_cxy )    // process copy is local
[457]602        { 
603            local = process_ptr;
604        }
[593]605        else                              // process copy is remote
[457]606        {
607            // update number of remote process copies
608            remote_nr++;
609
610            // atomically increment responses counter
611            hal_atomic_add( (void *)&rpc.responses , 1 );
612
[438]613#if DEBUG_PROCESS_SIGACTION
614if( DEBUG_PROCESS_SIGACTION < cycle )
[593]615printk("\n[%s] thread[%x,%x] send RPC to cluster %x for process %x\n",
[583]616__FUNCTION__, client->process->pid, client->trdid, process_cxy, pid );
[433]617#endif
[457]618            // call RPC in target cluster
619            rpc_process_sigaction_client( process_cxy , &rpc );
620        }
621    }  // end list of copies
622
[409]623    // release the lock protecting process copies
[564]624    remote_queuelock_release( lock_xp );
[409]625
[436]626    // restore IRQs
627    hal_restore_irq( save_sr);
[409]628
[457]629    // - if there is remote process copies, the client thread deschedules,
630    //   (it will be unblocked by the last RPC server thread).
631    // - if there is no remote copies, the client thread unblock itself.
632    if( remote_nr )
633    {
634        sched_yield("blocked on rpc_process_sigaction");
635    } 
636    else
637    {
638        thread_unblock( client_xp , THREAD_BLOCKED_RPC );
639    }
[409]640
[457]641    // handle the local process copy if required
642    if( local != NULL )
643    {
644
645#if DEBUG_PROCESS_SIGACTION
646if( DEBUG_PROCESS_SIGACTION < cycle )
[593]647printk("\n[%s] thread[%x,%x] handles local process %x in cluster %x\n",
[583]648__FUNCTION__, client->process->pid, client->trdid, pid , local_cxy );
[457]649#endif
650        if     (type == DELETE_ALL_THREADS  ) process_delete_threads ( local , client_xp ); 
[583]651        else if(type == BLOCK_ALL_THREADS   ) process_block_threads  ( local ); 
[457]652        else if(type == UNBLOCK_ALL_THREADS ) process_unblock_threads( local );
653    }
654
[438]655#if DEBUG_PROCESS_SIGACTION
[433]656cycle = (uint32_t)hal_get_cycles();
[438]657if( DEBUG_PROCESS_SIGACTION < cycle )
[593]658printk("\n[%s] thread[%x,%x] exit after %s process %x / cycle %d\n",
[583]659__FUNCTION__, client->process->pid, client->trdid,
[457]660process_action_str( type ), pid, cycle );
[433]661#endif
[416]662
[409]663}  // end process_sigaction()
664
[433]665/////////////////////////////////////////////////
[583]666void process_block_threads( process_t * process )
[1]667{
[409]668    thread_t          * target;         // pointer on target thread
[433]669    thread_t          * this;           // pointer on calling thread
[564]670    uint32_t            ltid;           // index in process th_tbl[]
[436]671    cxy_t               owner_cxy;      // target process owner cluster
[409]672    uint32_t            count;          // requests counter
[593]673    volatile uint32_t   ack_count;      // acknowledges counter
[1]674
[416]675    // get calling thread pointer
[433]676    this = CURRENT_THREAD;
[407]677
[438]678#if DEBUG_PROCESS_SIGACTION
[564]679pid_t pid = process->pid;
[433]680uint32_t cycle = (uint32_t)hal_get_cycles();
[438]681if( DEBUG_PROCESS_SIGACTION < cycle )
[593]682printk("\n[%s] thread[%x,%x] enter for process %x in cluster %x / cycle %d\n",
[583]683__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy , cycle );
[433]684#endif
[409]685
[564]686// check target process is an user process
[583]687assert( (LPID_FROM_PID( process->pid ) != 0 ), "target process must be an user process" );
[564]688
[610]689    // get target process owner cluster
[564]690    owner_cxy = CXY_FROM_PID( process->pid );
691
[409]692    // get lock protecting process th_tbl[]
[564]693    rwlock_rd_acquire( &process->th_lock );
[1]694
[440]695    // loop on target process local threads
[409]696    // we use both "ltid" and "count" because it can exist "holes" in th_tbl
[593]697    // - if the calling thread and the target thread are not running on the same
698    //   core, we ask the target scheduler to acknowlege the blocking
699    //   to be sure that the target thread is not running.
700    // - if the calling thread and the target thread are running on the same core,
701    //   we don't need confirmation from scheduler.
702           
[436]703    for( ltid = 0 , count = 0 , ack_count = 0 ; count < process->th_nr ; ltid++ )
[1]704    {
[409]705        target = process->th_tbl[ltid];
[1]706
[436]707        if( target != NULL )                                 // thread exist
[1]708        {
709            count++;
[409]710
[583]711            // set the global blocked bit in target thread descriptor.
712            thread_block( XPTR( local_cxy , target ) , THREAD_BLOCKED_GLOBAL );
[436]713 
[583]714            if( this->core->lid != target->core->lid )
715            {
716                // increment responses counter
717                hal_atomic_add( (void*)&ack_count , 1 );
[409]718
[583]719                // set FLAG_REQ_ACK and &ack_rsp_count in target descriptor
720                thread_set_req_ack( target , (uint32_t *)&ack_count );
[409]721
[583]722                // force scheduling on target thread
723                dev_pic_send_ipi( local_cxy , target->core->lid );
[409]724            }
[1]725        }
[172]726    }
727
[428]728    // release lock protecting process th_tbl[]
[564]729    rwlock_rd_release( &process->th_lock );
[416]730
[593]731    // wait other threads acknowledges  TODO this could be improved...
[409]732    while( 1 )
733    {
[610]734        // exit when all scheduler acknowledges received
[436]735        if ( ack_count == 0 ) break;
[409]736   
737        // wait 1000 cycles before retry
738        hal_fixed_delay( 1000 );
739    }
[1]740
[438]741#if DEBUG_PROCESS_SIGACTION
[433]742cycle = (uint32_t)hal_get_cycles();
[438]743if( DEBUG_PROCESS_SIGACTION < cycle )
[593]744printk("\n[%s] thread[%x,%x] exit for process %x in cluster %x / cycle %d\n",
745__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy , cycle );
[433]746#endif
[409]747
[428]748}  // end process_block_threads()
[409]749
[440]750/////////////////////////////////////////////////
751void process_delete_threads( process_t * process,
752                             xptr_t      client_xp )
[409]753{
[433]754    thread_t          * this;          // pointer on calling thread
[440]755    thread_t          * target;        // local pointer on target thread
756    xptr_t              target_xp;     // extended pointer on target thread
757    cxy_t               owner_cxy;     // owner process cluster
[409]758    uint32_t            ltid;          // index in process th_tbl
[440]759    uint32_t            count;         // threads counter
[409]760
[433]761    // get calling thread pointer
762    this = CURRENT_THREAD;
[409]763
[440]764    // get target process owner cluster
765    owner_cxy = CXY_FROM_PID( process->pid );
766
[438]767#if DEBUG_PROCESS_SIGACTION
[433]768uint32_t cycle = (uint32_t)hal_get_cycles();
[438]769if( DEBUG_PROCESS_SIGACTION < cycle )
[593]770printk("\n[%s] thread[%x,%x] enter in cluster %x for process %x / cycle %d\n",
[583]771__FUNCTION__, this->process->pid, this->trdid, local_cxy, process->pid, cycle );
[433]772#endif
773
[564]774// check target process is an user process
[593]775assert( (LPID_FROM_PID( process->pid ) != 0), "process %x not an user process", process->pid );
[564]776
[409]777    // get lock protecting process th_tbl[]
[583]778    rwlock_wr_acquire( &process->th_lock );
[409]779
[440]780    // loop on target process local threads                       
[416]781    // we use both "ltid" and "count" because it can exist "holes" in th_tbl
[440]782    for( ltid = 0 , count = 0  ; count < process->th_nr ; ltid++ )
[1]783    {
[409]784        target = process->th_tbl[ltid];
[1]785
[440]786        if( target != NULL )    // valid thread 
[1]787        {
[416]788            count++;
[440]789            target_xp = XPTR( local_cxy , target );
[1]790
[564]791            // main thread and client thread should not be deleted
[440]792            if( ((ltid != 0) || (owner_cxy != local_cxy)) &&         // not main thread
793                (client_xp) != target_xp )                           // not client thread
794            {
795                // mark target thread for delete and block it
796                thread_delete( target_xp , process->pid , false );   // not forced
797            }
[409]798        }
799    }
[1]800
[428]801    // release lock protecting process th_tbl[]
[583]802    rwlock_wr_release( &process->th_lock );
[407]803
[438]804#if DEBUG_PROCESS_SIGACTION
[433]805cycle = (uint32_t)hal_get_cycles();
[438]806if( DEBUG_PROCESS_SIGACTION < cycle )
[593]807printk("\n[%s] thread[%x,%x] exit for process %x in cluster %x / cycle %d\n",
808__FUNCTION__, this->process->pid, this->trdid, process->pid, local_cxy , cycle );
[433]809#endif
[407]810
[440]811}  // end process_delete_threads()
[409]812
[440]813///////////////////////////////////////////////////
814void process_unblock_threads( process_t * process )
[409]815{
[440]816    thread_t          * target;        // pointer on target thead
817    thread_t          * this;          // pointer on calling thread
[409]818    uint32_t            ltid;          // index in process th_tbl
[440]819    uint32_t            count;         // requests counter
[409]820
[440]821    // get calling thread pointer
822    this = CURRENT_THREAD;
823
[438]824#if DEBUG_PROCESS_SIGACTION
[564]825pid_t pid = process->pid;
[433]826uint32_t cycle = (uint32_t)hal_get_cycles();
[438]827if( DEBUG_PROCESS_SIGACTION < cycle )
[593]828printk("\n[%s] thread[%x,%x] enter for process %x in cluster %x / cycle %d\n",
[583]829__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy , cycle );
[433]830#endif
831
[564]832// check target process is an user process
833assert( ( process->pid != 0 ),
834"target process must be an user process" );
835
[416]836    // get lock protecting process th_tbl[]
[564]837    rwlock_rd_acquire( &process->th_lock );
[416]838
[440]839    // loop on process threads to unblock all threads
[416]840    // we use both "ltid" and "count" because it can exist "holes" in th_tbl
[440]841    for( ltid = 0 , count = 0 ; count < process->th_nr ; ltid++ )
[409]842    {
[416]843        target = process->th_tbl[ltid];
[409]844
[440]845        if( target != NULL )             // thread found
[409]846        {
847            count++;
[440]848
849            // reset the global blocked bit in target thread descriptor.
850            thread_unblock( XPTR( local_cxy , target ) , THREAD_BLOCKED_GLOBAL );
[1]851        }
852    }
853
[428]854    // release lock protecting process th_tbl[]
[564]855    rwlock_rd_release( &process->th_lock );
[407]856
[438]857#if DEBUG_PROCESS_SIGACTION
[433]858cycle = (uint32_t)hal_get_cycles();
[438]859if( DEBUG_PROCESS_SIGACTION < cycle )
[593]860printk("\n[%s] thread[%x,%x] exit for process %x in cluster %x / cycle %d\n",
[583]861__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy, cycle );
[433]862#endif
[1]863
[440]864}  // end process_unblock_threads()
[407]865
[1]866///////////////////////////////////////////////
867process_t * process_get_local_copy( pid_t pid )
868{
869    error_t        error;
[172]870    process_t    * process_ptr;   // local pointer on process
[23]871    xptr_t         process_xp;    // extended pointer on process
[1]872
873    cluster_t * cluster = LOCAL_CLUSTER;
874
[564]875#if DEBUG_PROCESS_GET_LOCAL_COPY
876thread_t * this = CURRENT_THREAD;
877uint32_t cycle = (uint32_t)hal_get_cycles();
878if( DEBUG_PROCESS_GET_LOCAL_COPY < cycle )
[593]879printk("\n[%s] thread[%x,%x] enter for process %x in cluster %x / cycle %d\n",
[583]880__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy, cycle );
[564]881#endif
882
[1]883    // get lock protecting local list of processes
[564]884    remote_queuelock_acquire( XPTR( local_cxy , &cluster->pmgr.local_lock ) );
[1]885
886    // scan the local list of process descriptors to find the process
[23]887    xptr_t  iter;
888    bool_t  found = false;
889    XLIST_FOREACH( XPTR( local_cxy , &cluster->pmgr.local_root ) , iter )
[1]890    {
[23]891        process_xp  = XLIST_ELEMENT( iter , process_t , local_list );
[435]892        process_ptr = GET_PTR( process_xp );
[23]893        if( process_ptr->pid == pid )
[1]894        {
895            found = true;
896            break;
897        }
898    }
899
900    // release lock protecting local list of processes
[564]901    remote_queuelock_release( XPTR( local_cxy , &cluster->pmgr.local_lock ) );
[1]902
[172]903    // allocate memory for a new local process descriptor
[440]904    // and initialise it from reference cluster if not found
[1]905    if( !found )
906    {
907        // get extended pointer on reference process descriptor
[23]908        xptr_t ref_xp = cluster_get_reference_process_from_pid( pid );
[1]909
[492]910        assert( (ref_xp != XPTR_NULL) , "illegal pid\n" );
[23]911
[1]912        // allocate memory for local process descriptor
[23]913        process_ptr = process_alloc();
[443]914
[23]915        if( process_ptr == NULL )  return NULL;
[1]916
917        // initialize local process descriptor copy
[23]918        error = process_copy_init( process_ptr , ref_xp );
[443]919
[1]920        if( error ) return NULL;
921    }
922
[440]923#if DEBUG_PROCESS_GET_LOCAL_COPY
[564]924cycle = (uint32_t)hal_get_cycles();
[440]925if( DEBUG_PROCESS_GET_LOCAL_COPY < cycle )
[593]926printk("\n[%s] thread[%x,%x] exit in cluster %x / process %x / cycle %d\n",
[583]927__FUNCTION__, this->process->pid, this->trdid, local_cxy, process_ptr, cycle );
[440]928#endif
929
[23]930    return process_ptr;
[1]931
[409]932}  // end process_get_local_copy()
933
[436]934////////////////////////////////////////////
935pid_t process_get_ppid( xptr_t  process_xp )
936{
937    cxy_t       process_cxy;
938    process_t * process_ptr;
939    xptr_t      parent_xp;
940    cxy_t       parent_cxy;
941    process_t * parent_ptr;
942
943    // get process cluster and local pointer
944    process_cxy = GET_CXY( process_xp );
945    process_ptr = GET_PTR( process_xp );
946
947    // get pointers on parent process
[564]948    parent_xp  = (xptr_t)hal_remote_l64( XPTR( process_cxy , &process_ptr->parent_xp ) );
[436]949    parent_cxy = GET_CXY( parent_xp );
950    parent_ptr = GET_PTR( parent_xp );
951
[564]952    return hal_remote_l32( XPTR( parent_cxy , &parent_ptr->pid ) );
[436]953}
954
[1]955//////////////////////////////////////////////////////////////////////////////////////////
956// File descriptor array related functions
957//////////////////////////////////////////////////////////////////////////////////////////
958
959///////////////////////////////////////////
960void process_fd_init( process_t * process )
961{
962    uint32_t fd;
963
[610]964    // initialize lock
[564]965    remote_queuelock_init( XPTR( local_cxy , &process->fd_array.lock ), LOCK_PROCESS_FDARRAY );
[1]966
[610]967    // initialize number of open files
[23]968    process->fd_array.current = 0;
969
[1]970    // initialize array
[23]971    for ( fd = 0 ; fd < CONFIG_PROCESS_FILE_MAX_NR ; fd++ )
[1]972    {
973        process->fd_array.array[fd] = XPTR_NULL;
974    }
975}
[610]976////////////////////////////////////////////////////
977error_t process_fd_register( xptr_t      process_xp,
[407]978                             xptr_t      file_xp,
979                             uint32_t  * fdid )
[1]980{
981    bool_t    found;
[23]982    uint32_t  id;
983    xptr_t    xp;
[1]984
[23]985    // get reference process cluster and local pointer
[610]986    process_t * process_ptr = GET_PTR( process_xp );
987    cxy_t       process_cxy = GET_CXY( process_xp );
[23]988
[610]989// check client process is reference process
990assert( (process_xp == hal_remote_l64( XPTR( process_cxy , &process_ptr->ref_xp ) ) ),
991"client process must be reference process\n" );
992
993#if DEBUG_PROCESS_FD_REGISTER
994thread_t * this  = CURRENT_THREAD;
995uint32_t   cycle = (uint32_t)hal_get_cycles();
996pid_t      pid   = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid) );
997if( DEBUG_PROCESS_FD_REGISTER < cycle )
998printk("\n[%s] thread[%x,%x] enter for process %x / cycle %d\n",
999__FUNCTION__, this->process->pid, this->trdid, pid, cycle );
1000#endif
1001
1002    // build extended pointer on lock protecting reference fd_array
1003    xptr_t lock_xp = XPTR( process_cxy , &process_ptr->fd_array.lock );
1004
[23]1005    // take lock protecting reference fd_array
[610]1006        remote_queuelock_acquire( lock_xp );
[23]1007
[1]1008    found   = false;
1009
[23]1010    for ( id = 0; id < CONFIG_PROCESS_FILE_MAX_NR ; id++ )
[1]1011    {
[610]1012        xp = hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[id] ) );
[23]1013        if ( xp == XPTR_NULL )
[1]1014        {
[564]1015            // update reference fd_array
[610]1016            hal_remote_s64( XPTR( process_cxy , &process_ptr->fd_array.array[id] ) , file_xp );
1017                hal_remote_atomic_add( XPTR( process_cxy , &process_ptr->fd_array.current ) , 1 );
[564]1018
1019            // exit
1020                        *fdid = id;
[1]1021            found = true;
1022            break;
1023        }
1024    }
1025
[610]1026    // release lock protecting fd_array
1027        remote_queuelock_release( lock_xp );
[1]1028
[610]1029#if DEBUG_PROCESS_FD_REGISTER
1030cycle = (uint32_t)hal_get_cycles();
1031if( DEBUG_PROCESS_FD_REGISTER < cycle )
1032printk("\n[%s] thread[%x,%x] exit for process %x / fdid %d / cycle %d\n",
1033__FUNCTION__, this->process->pid, this->trdid, pid, id, cycle );
1034#endif
1035
[428]1036    if ( !found ) return -1;
[1]1037    else          return 0;
1038
[610]1039}  // end process_fd_register()
1040
[172]1041////////////////////////////////////////////////
[23]1042xptr_t process_fd_get_xptr( process_t * process,
[407]1043                            uint32_t    fdid )
[1]1044{
[23]1045    xptr_t  file_xp;
[564]1046    xptr_t  lock_xp;
[1]1047
[23]1048    // access local copy of process descriptor
[407]1049    file_xp = process->fd_array.array[fdid];
[1]1050
[23]1051    if( file_xp == XPTR_NULL )
1052    {
1053        // get reference process cluster and local pointer
1054        xptr_t      ref_xp  = process->ref_xp;
1055        cxy_t       ref_cxy = GET_CXY( ref_xp );
[435]1056        process_t * ref_ptr = GET_PTR( ref_xp );
[1]1057
[564]1058        // build extended pointer on lock protecting reference fd_array
1059        lock_xp = XPTR( ref_cxy , &ref_ptr->fd_array.lock );
1060
1061        // take lock protecting reference fd_array
1062            remote_queuelock_acquire( lock_xp );
1063
[23]1064        // access reference process descriptor
[564]1065        file_xp = hal_remote_l64( XPTR( ref_cxy , &ref_ptr->fd_array.array[fdid] ) );
[1]1066
[23]1067        // update local fd_array if found
[564]1068        if( file_xp != XPTR_NULL )  process->fd_array.array[fdid] = file_xp;
1069       
1070        // release lock protecting reference fd_array
1071            remote_queuelock_release( lock_xp );
[23]1072    }
[1]1073
[23]1074    return file_xp;
[1]1075
[407]1076}  // end process_fd_get_xptr()
1077
[1]1078///////////////////////////////////////////
1079void process_fd_remote_copy( xptr_t dst_xp,
1080                             xptr_t src_xp )
1081{
1082    uint32_t fd;
1083    xptr_t   entry;
1084
1085    // get cluster and local pointer for src fd_array
1086    cxy_t        src_cxy = GET_CXY( src_xp );
[435]1087    fd_array_t * src_ptr = GET_PTR( src_xp );
[1]1088
1089    // get cluster and local pointer for dst fd_array
1090    cxy_t        dst_cxy = GET_CXY( dst_xp );
[435]1091    fd_array_t * dst_ptr = GET_PTR( dst_xp );
[1]1092
1093    // get the remote lock protecting the src fd_array
[564]1094        remote_queuelock_acquire( XPTR( src_cxy , &src_ptr->lock ) );
[1]1095
[428]1096    // loop on all fd_array entries
1097    for( fd = 0 ; fd < CONFIG_PROCESS_FILE_MAX_NR ; fd++ )
[1]1098        {
[564]1099                entry = (xptr_t)hal_remote_l64( XPTR( src_cxy , &src_ptr->array[fd] ) );
[1]1100
1101                if( entry != XPTR_NULL )
1102                {
[459]1103            // increment file descriptor refcount
[1]1104            vfs_file_count_up( entry );
1105
1106                        // copy entry in destination process fd_array
[564]1107                        hal_remote_s64( XPTR( dst_cxy , &dst_ptr->array[fd] ) , entry );
[1]1108                }
1109        }
1110
1111    // release lock on source process fd_array
[564]1112        remote_queuelock_release( XPTR( src_cxy , &src_ptr->lock ) );
[1]1113
[407]1114}  // end process_fd_remote_copy()
1115
[564]1116
1117////////////////////////////////////
1118bool_t process_fd_array_full( void )
1119{
1120    // get extended pointer on reference process
1121    xptr_t ref_xp = CURRENT_THREAD->process->ref_xp;
1122
1123    // get reference process cluster and local pointer
1124    process_t * ref_ptr = GET_PTR( ref_xp );
1125    cxy_t       ref_cxy = GET_CXY( ref_xp );
1126
1127    // get number of open file descriptors from reference fd_array
1128    uint32_t current = hal_remote_l32( XPTR( ref_cxy , &ref_ptr->fd_array.current ) );
1129
1130        return ( current >= CONFIG_PROCESS_FILE_MAX_NR );
1131}
1132
1133
[1]1134////////////////////////////////////////////////////////////////////////////////////
1135//  Thread related functions
1136////////////////////////////////////////////////////////////////////////////////////
1137
1138/////////////////////////////////////////////////////
1139error_t process_register_thread( process_t * process,
1140                                 thread_t  * thread,
1141                                 trdid_t   * trdid )
1142{
[472]1143    ltid_t         ltid;
1144    bool_t         found = false;
1145 
[564]1146// check arguments
1147assert( (process != NULL) , "process argument is NULL" );
1148assert( (thread != NULL) , "thread argument is NULL" );
[1]1149
[564]1150    // get the lock protecting th_tbl for all threads
1151    // but the idle thread executing kernel_init (cannot yield)
1152    if( thread->type != THREAD_IDLE ) rwlock_wr_acquire( &process->th_lock );
[1]1153
[583]1154    // scan th_tbl
[564]1155    for( ltid = 0 ; ltid < CONFIG_THREADS_MAX_PER_CLUSTER ; ltid++ )
[1]1156    {
1157        if( process->th_tbl[ltid] == NULL )
1158        {
1159            found = true;
1160            break;
1161        }
1162    }
1163
1164    if( found )
1165    {
1166        // register thread in th_tbl[]
1167        process->th_tbl[ltid] = thread;
1168        process->th_nr++;
1169
1170        // returns trdid
1171        *trdid = TRDID( local_cxy , ltid );
1172    }
1173
[583]1174    // release the lock protecting th_tbl
[564]1175    if( thread->type != THREAD_IDLE ) rwlock_wr_release( &process->th_lock );
[428]1176
[564]1177    return (found) ? 0 : 0xFFFFFFFF;
[204]1178
1179}  // end process_register_thread()
1180
[443]1181/////////////////////////////////////////////////
1182bool_t process_remove_thread( thread_t * thread )
[1]1183{
[443]1184    uint32_t count;  // number of threads in local process descriptor
1185
[1]1186    process_t * process = thread->process;
1187
1188    // get thread local index
1189    ltid_t  ltid = LTID_FROM_TRDID( thread->trdid );
[564]1190   
1191    // get the lock protecting th_tbl[]
1192    rwlock_wr_acquire( &process->th_lock );
[428]1193
[583]1194    // get number of threads
[443]1195    count = process->th_nr;
[428]1196
[583]1197// check thread
1198assert( (thread != NULL) , "thread argument is NULL" );
1199
[564]1200// check th_nr value
[583]1201assert( (count > 0) , "process th_nr cannot be 0\n" );
[443]1202
[1]1203    // remove thread from th_tbl[]
1204    process->th_tbl[ltid] = NULL;
[450]1205    process->th_nr = count-1;
[1]1206
[583]1207    // release lock protecting th_tbl
[564]1208    rwlock_wr_release( &process->th_lock );
[428]1209
[443]1210    return (count == 1);
1211
[450]1212}  // end process_remove_thread()
[204]1213
[408]1214/////////////////////////////////////////////////////////
1215error_t process_make_fork( xptr_t      parent_process_xp,
1216                           xptr_t      parent_thread_xp,
1217                           pid_t     * child_pid,
1218                           thread_t ** child_thread )
[1]1219{
[408]1220    process_t * process;         // local pointer on child process descriptor
1221    thread_t  * thread;          // local pointer on child thread descriptor
1222    pid_t       new_pid;         // process identifier for child process
1223    pid_t       parent_pid;      // process identifier for parent process
1224    xptr_t      ref_xp;          // extended pointer on reference process
[428]1225    xptr_t      vfs_bin_xp;      // extended pointer on .elf file
[408]1226    error_t     error;
[1]1227
[408]1228    // get cluster and local pointer for parent process
1229    cxy_t       parent_process_cxy = GET_CXY( parent_process_xp );
[435]1230    process_t * parent_process_ptr = GET_PTR( parent_process_xp );
[101]1231
[428]1232    // get parent process PID and extended pointer on .elf file
[564]1233    parent_pid = hal_remote_l32 (XPTR( parent_process_cxy , &parent_process_ptr->pid));
1234    vfs_bin_xp = hal_remote_l64(XPTR( parent_process_cxy , &parent_process_ptr->vfs_bin_xp));
[428]1235
[564]1236    // get extended pointer on reference process
1237    ref_xp = hal_remote_l64( XPTR( parent_process_cxy , &parent_process_ptr->ref_xp ) );
[438]1238
[564]1239// check parent process is the reference process
1240assert( (parent_process_xp == ref_xp ) ,
1241"parent process must be the reference process\n" );
[407]1242
[438]1243#if DEBUG_PROCESS_MAKE_FORK
[583]1244uint32_t cycle   = (uint32_t)hal_get_cycles();
1245thread_t * this  = CURRENT_THREAD;
1246trdid_t    trdid = this->trdid;
1247pid_t      pid   = this->process->pid;
[438]1248if( DEBUG_PROCESS_MAKE_FORK < cycle )
[593]1249printk("\n[%s] thread[%x,%x] enter / cluster %x / cycle %d\n",
[583]1250__FUNCTION__, pid, trdid, local_cxy, cycle );
[433]1251#endif
[172]1252
[408]1253    // allocate a process descriptor
1254    process = process_alloc();
1255    if( process == NULL )
1256    {
1257        printk("\n[ERROR] in %s : cannot get process in cluster %x\n", 
1258        __FUNCTION__, local_cxy ); 
1259        return -1;
1260    }
[1]1261
[408]1262    // allocate a child PID from local cluster
[416]1263    error = cluster_pid_alloc( process , &new_pid );
[428]1264    if( error ) 
[1]1265    {
[408]1266        printk("\n[ERROR] in %s : cannot get PID in cluster %x\n", 
1267        __FUNCTION__, local_cxy ); 
1268        process_free( process );
1269        return -1;
[1]1270    }
[408]1271
[469]1272#if( DEBUG_PROCESS_MAKE_FORK & 1 )
[457]1273cycle = (uint32_t)hal_get_cycles();
1274if( DEBUG_PROCESS_MAKE_FORK < cycle )
[593]1275printk("\n[%s] thread[%x,%x] allocated process %x / cycle %d\n",
[583]1276__FUNCTION__, pid, trdid, new_pid, cycle );
[457]1277#endif
1278
[408]1279    // initializes child process descriptor from parent process descriptor
1280    process_reference_init( process,
1281                            new_pid,
1282                            parent_process_xp );
1283
[438]1284#if( DEBUG_PROCESS_MAKE_FORK & 1 )
[433]1285cycle = (uint32_t)hal_get_cycles();
[438]1286if( DEBUG_PROCESS_MAKE_FORK < cycle )
[593]1287printk("\n[%s] thread[%x,%x] initialized child_process %x / cycle %d\n",
[583]1288__FUNCTION__, pid, trdid, new_pid, cycle );
[433]1289#endif
[408]1290
[457]1291
[408]1292    // copy VMM from parent descriptor to child descriptor
1293    error = vmm_fork_copy( process,
1294                           parent_process_xp );
1295    if( error )
[101]1296    {
[408]1297        printk("\n[ERROR] in %s : cannot copy VMM in cluster %x\n", 
1298        __FUNCTION__, local_cxy ); 
1299        process_free( process );
1300        cluster_pid_release( new_pid );
1301        return -1;
[101]1302    }
[172]1303
[438]1304#if( DEBUG_PROCESS_MAKE_FORK & 1 )
[433]1305cycle = (uint32_t)hal_get_cycles();
[438]1306if( DEBUG_PROCESS_MAKE_FORK < cycle )
[593]1307printk("\n[%s] thread[%x,%x] copied VMM from parent to child / cycle %d\n",
[583]1308__FUNCTION__, pid, trdid, cycle );
[433]1309#endif
[407]1310
[564]1311    // if parent_process is INIT, or if parent_process is the TXT owner,
1312    // the child_process becomes the owner of its TXT terminal
1313    if( (parent_pid == 1) || process_txt_is_owner( parent_process_xp ) )
[457]1314    {
1315        process_txt_set_ownership( XPTR( local_cxy , process ) );
1316
1317#if( DEBUG_PROCESS_MAKE_FORK & 1 )
1318cycle = (uint32_t)hal_get_cycles();
1319if( DEBUG_PROCESS_MAKE_EXEC < cycle )
[593]1320printk("\n[%s] thread[%x,%x] / child takes TXT ownership / cycle %d\n",
[583]1321__FUNCTION__ , pid, trdid, cycle );
[457]1322#endif
1323
1324    }
1325
[428]1326    // update extended pointer on .elf file
1327    process->vfs_bin_xp = vfs_bin_xp;
1328
[408]1329    // create child thread descriptor from parent thread descriptor
1330    error = thread_user_fork( parent_thread_xp,
1331                              process,
1332                              &thread );
1333    if( error )
1334    {
1335        printk("\n[ERROR] in %s : cannot create thread in cluster %x\n",
1336        __FUNCTION__, local_cxy ); 
1337        process_free( process );
1338        cluster_pid_release( new_pid );
1339        return -1;
1340    }
[172]1341
[564]1342// check main thread LTID
1343assert( (LTID_FROM_TRDID(thread->trdid) == 0) ,
1344"main thread must have LTID == 0\n" );
[428]1345
[564]1346#if( DEBUG_PROCESS_MAKE_FORK & 1 )
[433]1347cycle = (uint32_t)hal_get_cycles();
[438]1348if( DEBUG_PROCESS_MAKE_FORK < cycle )
[593]1349printk("\n[%s] thread[%x,%x] created main thread %x / cycle %d\n", 
[583]1350__FUNCTION__, pid, trdid, thread, cycle );
[433]1351#endif
[1]1352
[433]1353    // set Copy_On_Write flag in parent process GPT
[408]1354    // this includes all replicated GPT copies
1355    if( parent_process_cxy == local_cxy )   // reference is local
1356    {
1357        vmm_set_cow( parent_process_ptr );
1358    }
1359    else                                    // reference is remote
1360    {
1361        rpc_vmm_set_cow_client( parent_process_cxy,
1362                                parent_process_ptr );
1363    }
[1]1364
[433]1365    // set Copy_On_Write flag in child process GPT
1366    vmm_set_cow( process );
1367 
[438]1368#if( DEBUG_PROCESS_MAKE_FORK & 1 )
[433]1369cycle = (uint32_t)hal_get_cycles();
[438]1370if( DEBUG_PROCESS_MAKE_FORK < cycle )
[593]1371printk("\n[%s] thread[%x,%x] set COW in parent and child / cycle %d\n",
[583]1372__FUNCTION__, pid, trdid, cycle );
[433]1373#endif
[101]1374
[428]1375    // get extended pointers on parent children_root, children_lock and children_nr
1376    xptr_t children_root_xp = XPTR( parent_process_cxy , &parent_process_ptr->children_root );
1377    xptr_t children_lock_xp = XPTR( parent_process_cxy , &parent_process_ptr->children_lock );
1378    xptr_t children_nr_xp   = XPTR( parent_process_cxy , &parent_process_ptr->children_nr   );
[101]1379
[428]1380    // register process in parent children list
[564]1381    remote_queuelock_acquire( children_lock_xp );
[428]1382        xlist_add_last( children_root_xp , XPTR( local_cxy , &process->children_list ) );
1383        hal_remote_atomic_add( children_nr_xp , 1 );
[564]1384    remote_queuelock_release( children_lock_xp );
[204]1385
[408]1386    // return success
1387    *child_thread = thread;
1388    *child_pid    = new_pid;
[1]1389
[438]1390#if DEBUG_PROCESS_MAKE_FORK
[433]1391cycle = (uint32_t)hal_get_cycles();
[438]1392if( DEBUG_PROCESS_MAKE_FORK < cycle )
[593]1393printk("\n[%s] thread[%x,%x] exit / created process %x / cycle %d\n",
[583]1394__FUNCTION__, pid, trdid, new_pid, cycle );
[433]1395#endif
[428]1396
[408]1397    return 0;
1398
[416]1399}   // end process_make_fork()
[408]1400
1401/////////////////////////////////////////////////////
1402error_t process_make_exec( exec_info_t  * exec_info )
1403{
[457]1404    thread_t       * thread;                  // local pointer on this thread
1405    process_t      * process;                 // local pointer on this process
1406    pid_t            pid;                     // this process identifier
[610]1407    xptr_t           ref_xp;                  // reference process for this process
[441]1408        error_t          error;                   // value returned by called functions
[457]1409    char           * path;                    // path to .elf file
1410    xptr_t           file_xp;                 // extended pointer on .elf file descriptor
1411    uint32_t         file_id;                 // file index in fd_array
1412    uint32_t         args_nr;                 // number of main thread arguments
1413    char          ** args_pointers;           // array of pointers on main thread arguments
[446]1414
[610]1415    // get thread, process, pid and ref_xp
[457]1416    thread  = CURRENT_THREAD;
1417    process = thread->process;
1418    pid     = process->pid;
[610]1419    ref_xp  = process->ref_xp;
[408]1420
[457]1421        // get relevant infos from exec_info
1422        path          = exec_info->path;
1423    args_nr       = exec_info->args_nr;
1424    args_pointers = exec_info->args_pointers;
[408]1425
[438]1426#if DEBUG_PROCESS_MAKE_EXEC
[433]1427uint32_t cycle = (uint32_t)hal_get_cycles();
[438]1428if( DEBUG_PROCESS_MAKE_EXEC < cycle )
[593]1429printk("\n[%s] thread[%x,%x] enters for %s / cycle %d\n",
[583]1430__FUNCTION__, pid, thread->trdid, path, cycle );
[433]1431#endif
[408]1432
[457]1433    // open the file identified by <path>
1434    file_xp = XPTR_NULL;
[564]1435    file_id = 0xFFFFFFFF;
[610]1436        error   = vfs_open( process->vfs_root_xp,
[457]1437                            path,
[610]1438                        ref_xp,
[457]1439                            O_RDONLY,
1440                            0,
1441                            &file_xp,
1442                            &file_id );
1443        if( error )
1444        {
1445                printk("\n[ERROR] in %s : failed to open file <%s>\n", __FUNCTION__ , path );
1446                return -1;
1447        }
1448
[446]1449#if (DEBUG_PROCESS_MAKE_EXEC & 1)
[469]1450cycle = (uint32_t)hal_get_cycles();
[446]1451if( DEBUG_PROCESS_MAKE_EXEC < cycle )
[593]1452printk("\n[%s] thread[%x,%x] opened file <%s> / cycle %d\n",
[583]1453__FUNCTION__, pid, thread->trdid, path, cycle );
[446]1454#endif
1455
[457]1456    // delete all threads other than this main thread in all clusters
1457    process_sigaction( pid , DELETE_ALL_THREADS );
[446]1458
[469]1459#if (DEBUG_PROCESS_MAKE_EXEC & 1)
1460cycle = (uint32_t)hal_get_cycles();
1461if( DEBUG_PROCESS_MAKE_EXEC < cycle )
[593]1462printk("\n[%s] thread[%x,%x] deleted all threads / cycle %d\n",
[583]1463__FUNCTION__, pid, thread->trdid, cycle );
[469]1464#endif
1465
[457]1466    // reset local process VMM
1467    vmm_destroy( process );
[446]1468
[457]1469#if( DEBUG_PROCESS_MAKE_EXEC & 1 )
1470cycle = (uint32_t)hal_get_cycles();
1471if( DEBUG_PROCESS_MAKE_EXEC < cycle )
[593]1472printk("\n[%s] thread[%x,%x] reset VMM / cycle %d\n",
[583]1473__FUNCTION__, pid, thread->trdid, cycle );
[457]1474#endif
[408]1475
[457]1476    // re-initialize the VMM (kentry/args/envs vsegs registration)
1477    error = vmm_init( process );
1478    if( error )
[416]1479    {
[457]1480        printk("\n[ERROR] in %s : cannot initialise VMM for %s\n", __FUNCTION__ , path );
1481        vfs_close( file_xp , file_id );
1482        // FIXME restore old process VMM
[416]1483        return -1;
1484    }
[457]1485   
[438]1486#if( DEBUG_PROCESS_MAKE_EXEC & 1 )
[433]1487cycle = (uint32_t)hal_get_cycles();
[438]1488if( DEBUG_PROCESS_MAKE_EXEC < cycle )
[593]1489printk("\n[%s] thread[%x,%x] / kentry/args/envs vsegs registered / cycle %d\n",
[583]1490__FUNCTION__, pid, thread->trdid, cycle );
[433]1491#endif
[428]1492
[457]1493    // register code & data vsegs as well as entry-point in process VMM,
[428]1494    // and register extended pointer on .elf file in process descriptor
[457]1495        error = elf_load_process( file_xp , process );
[441]1496    if( error )
[1]1497        {
[441]1498                printk("\n[ERROR] in %s : failed to access <%s>\n", __FUNCTION__ , path );
[457]1499        vfs_close( file_xp , file_id );
1500        // FIXME restore old process VMM
[408]1501        return -1;
[1]1502        }
1503
[438]1504#if( DEBUG_PROCESS_MAKE_EXEC & 1 )
[433]1505cycle = (uint32_t)hal_get_cycles();
[438]1506if( DEBUG_PROCESS_MAKE_EXEC < cycle )
[593]1507printk("\n[%s] thread[%x,%x] / code/data vsegs registered / cycle %d\n",
[583]1508__FUNCTION__, pid, thread->trdid, cycle );
[433]1509#endif
[1]1510
[457]1511    // update the existing main thread descriptor... and jump to user code
1512    error = thread_user_exec( (void *)process->vmm.entry_point,
1513                              args_nr,
1514                              args_pointers );
1515    if( error )
1516    {
[469]1517        printk("\n[ERROR] in %s : cannot update main thread for %s\n", __FUNCTION__ , path );
[457]1518        vfs_close( file_xp , file_id );
1519        // FIXME restore old process VMM
[408]1520        return -1;
[457]1521    }
[1]1522
[492]1523    assert( false, "we should not execute this code");
[457]1524 
[409]1525        return 0;
1526
1527}  // end process_make_exec()
1528
[457]1529
[428]1530///////////////////////////////////////////////
1531void process_zero_create( process_t * process )
1532{
[580]1533    error_t error;
1534    pid_t   pid;
[428]1535
[438]1536#if DEBUG_PROCESS_ZERO_CREATE
[433]1537uint32_t cycle = (uint32_t)hal_get_cycles();
[438]1538if( DEBUG_PROCESS_ZERO_CREATE < cycle )
[593]1539printk("\n[%s] enter / cluster %x / cycle %d\n",
[564]1540__FUNCTION__, local_cxy, cycle );
[433]1541#endif
[428]1542
[580]1543    // get PID from local cluster manager for this kernel process
1544    error = cluster_pid_alloc( process , &pid );
1545
1546    if( error || (LPID_FROM_PID( pid ) != 0) )
1547    {
1548        printk("\n[PANIC] in %s : cannot get valid PID in cluster %x / PID = %x\n",
1549        __FUNCTION__ , local_cxy, pid );
1550        hal_core_sleep();
1551    }
1552
[428]1553    // initialize PID, REF_XP, PARENT_XP, and STATE
[580]1554    // the kernel process_zero is its own parent_process,
1555    // reference_process, and owner_process, and cannot be killed...
1556    process->pid        = pid;
[433]1557    process->ref_xp     = XPTR( local_cxy , process );
[443]1558    process->owner_xp   = XPTR( local_cxy , process );
[580]1559    process->parent_xp  = XPTR( local_cxy , process );
[433]1560    process->term_state = 0;
[428]1561
[564]1562    // reset th_tbl[] array and associated fields
[428]1563    uint32_t i;
[564]1564    for( i = 0 ; i < CONFIG_THREADS_MAX_PER_CLUSTER ; i++ )
[428]1565        {
1566        process->th_tbl[i] = NULL;
1567    }
1568    process->th_nr  = 0;
[564]1569    rwlock_init( &process->th_lock , LOCK_PROCESS_THTBL );
[428]1570
[564]1571
[428]1572    // reset children list as empty
1573    xlist_root_init( XPTR( local_cxy , &process->children_root ) );
1574    process->children_nr = 0;
[564]1575    remote_queuelock_init( XPTR( local_cxy , &process->children_lock ),
1576                           LOCK_PROCESS_CHILDREN );
[428]1577
[580]1578    // register kernel process in cluster manager local_list
1579    cluster_process_local_link( process );
1580   
[428]1581        hal_fence();
1582
[438]1583#if DEBUG_PROCESS_ZERO_CREATE
[433]1584cycle = (uint32_t)hal_get_cycles();
[438]1585if( DEBUG_PROCESS_ZERO_CREATE < cycle )
[593]1586printk("\n[%s] exit / cluster %x / cycle %d\n",
[564]1587__FUNCTION__, local_cxy, cycle );
[433]1588#endif
[428]1589
[610]1590}  // end process_zero_create()
[428]1591
[564]1592////////////////////////////////
[485]1593void process_init_create( void )
[1]1594{
[428]1595    process_t      * process;       // local pointer on process descriptor
[409]1596    pid_t            pid;           // process_init identifier
1597    thread_t       * thread;        // local pointer on main thread
1598    pthread_attr_t   attr;          // main thread attributes
1599    lid_t            lid;           // selected core local index for main thread
[457]1600    xptr_t           file_xp;       // extended pointer on .elf file descriptor
1601    uint32_t         file_id;       // file index in fd_array
[409]1602    error_t          error;
[1]1603
[438]1604#if DEBUG_PROCESS_INIT_CREATE
[610]1605thread_t * this = CURRENT_THREAD;
[433]1606uint32_t cycle = (uint32_t)hal_get_cycles();
[438]1607if( DEBUG_PROCESS_INIT_CREATE < cycle )
[610]1608printk("\n[%s] thread[%x,%x] enter / cycle %d\n",
1609__FUNCTION__, this->process->pid, this->trdid, cycle );
[433]1610#endif
[1]1611
[408]1612    // allocates memory for process descriptor from local cluster
1613        process = process_alloc(); 
[457]1614       
[564]1615// check memory allocator
1616assert( (process != NULL),
1617"no memory for process descriptor in cluster %x\n", local_cxy  );
[101]1618
[610]1619    // set the CWD and VFS_ROOT fields in process descriptor
1620    process->cwd_xp      = process_zero.vfs_root_xp;
1621    process->vfs_root_xp = process_zero.vfs_root_xp;
1622
[409]1623    // get PID from local cluster
[416]1624    error = cluster_pid_alloc( process , &pid );
[408]1625
[564]1626// check PID allocator
1627assert( (error == 0),
1628"cannot allocate PID in cluster %x\n", local_cxy );
[409]1629
[564]1630// check PID value
1631assert( (pid == 1) ,
1632"process INIT must be first process in cluster 0\n" );
[457]1633
[409]1634    // initialize process descriptor / parent is local process_zero
1635    process_reference_init( process,
[408]1636                            pid,
[457]1637                            XPTR( local_cxy , &process_zero ) ); 
[408]1638
[564]1639#if(DEBUG_PROCESS_INIT_CREATE & 1)
1640if( DEBUG_PROCESS_INIT_CREATE < cycle )
[610]1641printk("\n[%s] thread[%x,%x] initialized process descriptor\n",
1642__FUNCTION__, this->process->pid, this->trdid );
[564]1643#endif
1644
[457]1645    // open the file identified by CONFIG_PROCESS_INIT_PATH
1646    file_xp = XPTR_NULL;
1647    file_id = -1;
[610]1648        error   = vfs_open( process->vfs_root_xp,
[457]1649                            CONFIG_PROCESS_INIT_PATH,
[610]1650                        XPTR( local_cxy , process ),
[457]1651                            O_RDONLY,
1652                            0,
1653                            &file_xp,
1654                            &file_id );
1655
[564]1656assert( (error == 0),
1657"failed to open file <%s>\n", CONFIG_PROCESS_INIT_PATH );
[457]1658
[564]1659#if(DEBUG_PROCESS_INIT_CREATE & 1)
1660if( DEBUG_PROCESS_INIT_CREATE < cycle )
[610]1661printk("\n[%s] thread[%x,%x] open .elf file decriptor\n",
1662__FUNCTION__, this->process->pid, this->trdid );
[564]1663#endif
1664
1665   // register "code" and "data" vsegs as well as entry-point
[409]1666    // in process VMM, using information contained in the elf file.
[457]1667        error = elf_load_process( file_xp , process );
[101]1668
[564]1669assert( (error == 0),
1670"cannot access .elf file <%s>\n", CONFIG_PROCESS_INIT_PATH );
[457]1671
[564]1672#if(DEBUG_PROCESS_INIT_CREATE & 1)
1673if( DEBUG_PROCESS_INIT_CREATE < cycle )
[610]1674printk("\n[%s] thread[%x,%x] registered code/data vsegs in VMM\n",
1675__FUNCTION__, this->process->pid, this->trdid );
[564]1676#endif
1677
[428]1678    // get extended pointers on process_zero children_root, children_lock
1679    xptr_t children_root_xp = XPTR( local_cxy , &process_zero.children_root );
1680    xptr_t children_lock_xp = XPTR( local_cxy , &process_zero.children_lock );
1681
[564]1682    // take lock protecting kernel process children list
1683    remote_queuelock_acquire( children_lock_xp );
1684
[428]1685    // register process INIT in parent local process_zero
1686        xlist_add_last( children_root_xp , XPTR( local_cxy , &process->children_list ) );
1687        hal_atomic_add( &process_zero.children_nr , 1 );
1688
[564]1689    // release lock protecting kernel process children list
1690    remote_queuelock_release( children_lock_xp );
1691
1692#if(DEBUG_PROCESS_INIT_CREATE & 1)
1693if( DEBUG_PROCESS_INIT_CREATE < cycle )
[610]1694printk("\n[%s] thread[%x,%x] registered init process in parent\n",
1695__FUNCTION__, this->process->pid, this->trdid );
[564]1696#endif
1697
[409]1698    // select a core in local cluster to execute the main thread
1699    lid  = cluster_select_local_core();
1700
1701    // initialize pthread attributes for main thread
1702    attr.attributes = PT_ATTR_DETACH | PT_ATTR_CLUSTER_DEFINED | PT_ATTR_CORE_DEFINED;
1703    attr.cxy        = local_cxy;
1704    attr.lid        = lid;
1705
1706    // create and initialize thread descriptor
1707        error = thread_user_create( pid,
1708                                (void *)process->vmm.entry_point,
1709                                NULL,
1710                                &attr,
1711                                &thread );
[1]1712
[564]1713assert( (error == 0),
1714"cannot create main thread for <%s>\n", CONFIG_PROCESS_INIT_PATH );
[428]1715
[564]1716assert( (thread->trdid == 0),
1717"main thread must have index 0 for <%s>\n", CONFIG_PROCESS_INIT_PATH );
[457]1718
[564]1719#if(DEBUG_PROCESS_INIT_CREATE & 1)
1720if( DEBUG_PROCESS_INIT_CREATE < cycle )
[610]1721printk("\n[%s] thread[%x,%x] created main thread\n",
1722__FUNCTION__, this->process->pid, this->trdid );
[564]1723#endif
1724
[409]1725    // activate thread
1726        thread_unblock( XPTR( local_cxy , thread ) , THREAD_BLOCKED_GLOBAL );
1727
[124]1728    hal_fence();
[1]1729
[438]1730#if DEBUG_PROCESS_INIT_CREATE
[433]1731cycle = (uint32_t)hal_get_cycles();
[438]1732if( DEBUG_PROCESS_INIT_CREATE < cycle )
[610]1733printk("\n[%s] thread[%x,%x] exit / cycle %d\n",
1734__FUNCTION__, this->process->pid, this->trdid, cycle );
[433]1735#endif
[409]1736
[204]1737}  // end process_init_create()
1738
[428]1739/////////////////////////////////////////
1740void process_display( xptr_t process_xp )
1741{
1742    process_t   * process_ptr;
1743    cxy_t         process_cxy;
[443]1744
[428]1745    xptr_t        parent_xp;       // extended pointer on parent process
1746    process_t   * parent_ptr;
1747    cxy_t         parent_cxy;
1748
[443]1749    xptr_t        owner_xp;        // extended pointer on owner process
1750    process_t   * owner_ptr;
1751    cxy_t         owner_cxy;
1752
[428]1753    pid_t         pid;
1754    pid_t         ppid;
[580]1755    lpid_t        lpid;
[428]1756    uint32_t      state;
1757    uint32_t      th_nr;
1758
[443]1759    xptr_t        txt_file_xp;     // extended pointer on TXT_RX file descriptor
1760    xptr_t        txt_chdev_xp;    // extended pointer on TXT_RX chdev
1761    chdev_t     * txt_chdev_ptr;
1762    cxy_t         txt_chdev_cxy;
1763    xptr_t        txt_owner_xp;    // extended pointer on TXT owner process
[428]1764
1765    xptr_t        elf_file_xp;     // extended pointer on .elf file
1766    cxy_t         elf_file_cxy;
1767    vfs_file_t  * elf_file_ptr;
1768    vfs_inode_t * elf_inode_ptr;   // local pointer on .elf inode
1769
1770    char          txt_name[CONFIG_VFS_MAX_NAME_LENGTH];
1771    char          elf_name[CONFIG_VFS_MAX_NAME_LENGTH];
1772
1773    // get cluster and local pointer on process
1774    process_ptr = GET_PTR( process_xp );
1775    process_cxy = GET_CXY( process_xp );
1776
[580]1777    // get process PID, LPID, and state
[564]1778    pid   = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid ) );
[580]1779    lpid  = LPID_FROM_PID( pid );
[564]1780    state = hal_remote_l32( XPTR( process_cxy , &process_ptr->term_state ) );
[428]1781
[580]1782    // get process PPID
[564]1783    parent_xp  = hal_remote_l64( XPTR( process_cxy , &process_ptr->parent_xp ) );
[428]1784    parent_cxy = GET_CXY( parent_xp );
1785    parent_ptr = GET_PTR( parent_xp );
[564]1786    ppid       = hal_remote_l32( XPTR( parent_cxy , &parent_ptr->pid ) );
[428]1787
1788    // get number of threads
[564]1789    th_nr      = hal_remote_l32( XPTR( process_cxy , &process_ptr->th_nr ) );
[428]1790
[443]1791    // get pointers on owner process descriptor
[564]1792    owner_xp  = hal_remote_l64( XPTR( process_cxy , &process_ptr->owner_xp ) );
[443]1793    owner_cxy = GET_CXY( owner_xp );
1794    owner_ptr = GET_PTR( owner_xp );
[428]1795
[580]1796    // get process TXT name and .elf name
1797    if( lpid )                                   // user process
1798    {
[443]1799
[580]1800        // get extended pointer on file descriptor associated to TXT_RX
1801        txt_file_xp = hal_remote_l64( XPTR( owner_cxy , &owner_ptr->fd_array.array[0] ) );
[428]1802
[580]1803        assert( (txt_file_xp != XPTR_NULL) ,
1804        "process must be attached to one TXT terminal\n" ); 
[443]1805
[580]1806        // get TXT_RX chdev pointers
1807        txt_chdev_xp  = chdev_from_file( txt_file_xp );
1808        txt_chdev_cxy = GET_CXY( txt_chdev_xp );
1809        txt_chdev_ptr = GET_PTR( txt_chdev_xp );
1810
1811        // get TXT_RX name and ownership
1812        hal_remote_strcpy( XPTR( local_cxy , txt_name ) ,
1813                           XPTR( txt_chdev_cxy , txt_chdev_ptr->name ) );
[428]1814   
[580]1815        txt_owner_xp = (xptr_t)hal_remote_l64( XPTR( txt_chdev_cxy, 
1816                                                     &txt_chdev_ptr->ext.txt.owner_xp ) );
[428]1817
[580]1818        // get process .elf name
1819        elf_file_xp   = hal_remote_l64( XPTR( process_cxy , &process_ptr->vfs_bin_xp ) );
1820        elf_file_cxy  = GET_CXY( elf_file_xp );
1821        elf_file_ptr  = GET_PTR( elf_file_xp );
1822        elf_inode_ptr = hal_remote_lpt( XPTR( elf_file_cxy , &elf_file_ptr->inode ) );
1823        vfs_inode_get_name( XPTR( elf_file_cxy , elf_inode_ptr ) , elf_name );
1824    }
1825    else                                         // kernel process_zero
1826    {
1827        // TXT name and .elf name are not registered in kernel process_zero
1828        strcpy( txt_name , "txt0_rx" );
1829        txt_owner_xp = process_xp; 
1830        strcpy( elf_name , "kernel.elf" );
1831    }
1832
[428]1833    // display process info
[443]1834    if( txt_owner_xp == process_xp )
[428]1835    {
[581]1836        nolock_printk("PID %X | %s (FG) | %X | PPID %X | TS %X | %d | %s\n", 
1837        pid, txt_name, process_ptr, ppid, state, th_nr, elf_name );
[428]1838    }
1839    else
1840    {
[581]1841        nolock_printk("PID %X | %s (BG) | %X | PPID %X | TS %X | %d | %s\n", 
1842        pid, txt_name, process_ptr, ppid, state, th_nr, elf_name );
[428]1843    }
1844}  // end process_display()
1845
1846
1847////////////////////////////////////////////////////////////////////////////////////////
1848//     Terminals related functions
1849////////////////////////////////////////////////////////////////////////////////////////
1850
[581]1851//////////////////////////////////
[485]1852uint32_t process_txt_alloc( void )
[428]1853{
1854    uint32_t  index;       // TXT terminal index
1855    xptr_t    chdev_xp;    // extended pointer on TXT_RX chdev
1856    chdev_t * chdev_ptr;   // local pointer on TXT_RX chdev
1857    cxy_t     chdev_cxy;   // TXT_RX chdev cluster
1858    xptr_t    root_xp;     // extended pointer on owner field in chdev
1859
1860    // scan the user TXT_RX chdevs (TXT0 is reserved for kernel)
1861    for( index = 1 ; index < LOCAL_CLUSTER->nb_txt_channels ; index ++ )
1862    {
1863        // get pointers on TXT_RX[index]
1864        chdev_xp  = chdev_dir.txt_rx[index];
1865        chdev_cxy = GET_CXY( chdev_xp );
1866        chdev_ptr = GET_PTR( chdev_xp );
1867
1868        // get extended pointer on root of attached process
1869        root_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.root );
1870
1871        // return free TXT index if found
1872        if( xlist_is_empty( root_xp ) ) return index; 
1873    }
1874
[492]1875    assert( false , "no free TXT terminal found" );
[428]1876
1877    return -1;
1878
1879} // end process_txt_alloc()
1880
1881/////////////////////////////////////////////
1882void process_txt_attach( process_t * process,
1883                         uint32_t    txt_id )
1884{
1885    xptr_t      chdev_xp;     // extended pointer on TXT_RX chdev
1886    cxy_t       chdev_cxy;    // TXT_RX chdev cluster
1887    chdev_t *   chdev_ptr;    // local pointer on TXT_RX chdev
1888    xptr_t      root_xp;      // extended pointer on list root in chdev
1889    xptr_t      lock_xp;      // extended pointer on list lock in chdev
1890
[564]1891// check process is in owner cluster
1892assert( (CXY_FROM_PID( process->pid ) == local_cxy) ,
1893"process descriptor not in owner cluster" );
[428]1894
[564]1895// check terminal index
1896assert( (txt_id < LOCAL_CLUSTER->nb_txt_channels) ,
1897"illegal TXT terminal index" );
[428]1898
1899    // get pointers on TXT_RX[txt_id] chdev
1900    chdev_xp  = chdev_dir.txt_rx[txt_id];
1901    chdev_cxy = GET_CXY( chdev_xp );
1902    chdev_ptr = GET_PTR( chdev_xp );
1903
1904    // get extended pointer on root & lock of attached process list
1905    root_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.root );
1906    lock_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.lock );
1907
[564]1908    // get lock protecting list of processes attached to TXT
1909    remote_busylock_acquire( lock_xp );
1910
[428]1911    // insert process in attached process list
1912    xlist_add_last( root_xp , XPTR( local_cxy , &process->txt_list ) );
1913
[564]1914    // release lock protecting list of processes attached to TXT
1915    remote_busylock_release( lock_xp );
1916
[446]1917#if DEBUG_PROCESS_TXT
[610]1918thread_t * this = CURRENT_THREAD;
[457]1919uint32_t cycle = (uint32_t)hal_get_cycles();
[446]1920if( DEBUG_PROCESS_TXT < cycle )
[610]1921printk("\n[%s] thread[%x,%x] attached process %x to TXT %d / cycle %d\n",
1922__FUNCTION__, this->process->pid, this->trdid, process->pid, txt_id , cycle );
[433]1923#endif
[428]1924
1925} // end process_txt_attach()
1926
[436]1927/////////////////////////////////////////////
1928void process_txt_detach( xptr_t  process_xp )
[428]1929{
[436]1930    process_t * process_ptr;  // local pointer on process in owner cluster
1931    cxy_t       process_cxy;  // process owner cluster
1932    pid_t       process_pid;  // process identifier
1933    xptr_t      file_xp;      // extended pointer on stdin file
[428]1934    xptr_t      chdev_xp;     // extended pointer on TXT_RX chdev
1935    cxy_t       chdev_cxy;    // TXT_RX chdev cluster
1936    chdev_t *   chdev_ptr;    // local pointer on TXT_RX chdev
1937    xptr_t      lock_xp;      // extended pointer on list lock in chdev
1938
[436]1939    // get process cluster, local pointer, and PID
1940    process_cxy = GET_CXY( process_xp );
1941    process_ptr = GET_PTR( process_xp );
[564]1942    process_pid = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid ) );
[436]1943
[564]1944// check process descriptor in owner cluster
1945assert( (CXY_FROM_PID( process_pid ) == process_cxy ) ,
1946"process descriptor not in owner cluster" );
[436]1947
1948    // release TXT ownership (does nothing if not TXT owner)
1949    process_txt_transfer_ownership( process_xp );
[428]1950
[436]1951    // get extended pointer on process stdin file
[564]1952    file_xp = (xptr_t)hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[0] ) );
[436]1953
1954    // get pointers on TXT_RX chdev
1955    chdev_xp  = chdev_from_file( file_xp );
[428]1956    chdev_cxy = GET_CXY( chdev_xp );
1957    chdev_ptr = (chdev_t *)GET_PTR( chdev_xp );
1958
[436]1959    // get extended pointer on lock protecting attached process list
[428]1960    lock_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.lock );
1961
[564]1962    // get lock protecting list of processes attached to TXT
1963    remote_busylock_acquire( lock_xp );
1964
[428]1965    // unlink process from attached process list
[436]1966    xlist_unlink( XPTR( process_cxy , &process_ptr->txt_list ) );
1967
[564]1968    // release lock protecting list of processes attached to TXT
1969    remote_busylock_release( lock_xp );
1970
[446]1971#if DEBUG_PROCESS_TXT
[610]1972thread_t * this = CURRENT_THREAD;
[457]1973uint32_t cycle  = (uint32_t)hal_get_cycles();
[564]1974uint32_t txt_id = hal_remote_l32( XPTR( chdev_cxy , &chdev_ptr->channel ) );
[446]1975if( DEBUG_PROCESS_TXT < cycle )
[610]1976printk("\n[%s] thread[%x,%x] detached process %x from TXT %d / cycle %d\n",
1977__FUNCTION__, this->process->pid, this->trdid, process_pid, txt_id, cycle );
[433]1978#endif
[428]1979
1980} // end process_txt_detach()
1981
1982///////////////////////////////////////////////////
1983void process_txt_set_ownership( xptr_t process_xp )
1984{
1985    process_t * process_ptr;
1986    cxy_t       process_cxy;
[436]1987    pid_t       process_pid;
[428]1988    xptr_t      file_xp;
1989    xptr_t      txt_xp;     
1990    chdev_t   * txt_ptr;
1991    cxy_t       txt_cxy;
1992
[436]1993    // get pointers on process in owner cluster
[428]1994    process_cxy = GET_CXY( process_xp );
[435]1995    process_ptr = GET_PTR( process_xp );
[564]1996    process_pid = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid ) );
[436]1997
1998    // check owner cluster
[492]1999    assert( (process_cxy == CXY_FROM_PID( process_pid )) ,
[436]2000    "process descriptor not in owner cluster\n" );
2001
[428]2002    // get extended pointer on stdin pseudo file
[564]2003    file_xp = hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[0] ) );
[428]2004
2005    // get pointers on TXT chdev
2006    txt_xp  = chdev_from_file( file_xp );
2007    txt_cxy = GET_CXY( txt_xp );
[435]2008    txt_ptr = GET_PTR( txt_xp );
[428]2009
2010    // set owner field in TXT chdev
[564]2011    hal_remote_s64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , process_xp );
[428]2012
[446]2013#if DEBUG_PROCESS_TXT
[610]2014thread_t * this = CURRENT_THREAD;
[457]2015uint32_t cycle  = (uint32_t)hal_get_cycles();
[564]2016uint32_t txt_id = hal_remote_l32( XPTR( txt_cxy , &txt_ptr->channel ) );
[446]2017if( DEBUG_PROCESS_TXT < cycle )
[610]2018printk("\n[%s] thread[%x,%x] give TXT %d to process %x / cycle %d\n",
2019__FUNCTION__, this->process->pid, this->trdid, txt_id, process_pid, cycle );
[436]2020#endif
2021
[428]2022}  // end process_txt_set ownership()
2023
[436]2024////////////////////////////////////////////////////////
2025void process_txt_transfer_ownership( xptr_t process_xp )
[428]2026{
[436]2027    process_t * process_ptr;     // local pointer on process releasing ownership
2028    cxy_t       process_cxy;     // process cluster
2029    pid_t       process_pid;     // process identifier
[428]2030    xptr_t      file_xp;         // extended pointer on TXT_RX pseudo file
2031    xptr_t      txt_xp;          // extended pointer on TXT_RX chdev
[433]2032    chdev_t   * txt_ptr;         // local pointer on TXT_RX chdev
2033    cxy_t       txt_cxy;         // cluster of TXT_RX chdev
2034    uint32_t    txt_id;          // TXT_RX channel
[428]2035    xptr_t      owner_xp;        // extended pointer on current TXT_RX owner
2036    xptr_t      root_xp;         // extended pointer on root of attached process list
[436]2037    xptr_t      lock_xp;         // extended pointer on lock protecting attached process list
[428]2038    xptr_t      iter_xp;         // iterator for xlist
2039    xptr_t      current_xp;      // extended pointer on current process
[433]2040    process_t * current_ptr;     // local pointer on current process
2041    cxy_t       current_cxy;     // cluster for current process
[428]2042
[457]2043#if DEBUG_PROCESS_TXT
[610]2044thread_t * this  = CURRENT_THREAD;
2045uint32_t   cycle;
[457]2046#endif
2047
[436]2048    // get pointers on process in owner cluster
[428]2049    process_cxy = GET_CXY( process_xp );
[435]2050    process_ptr = GET_PTR( process_xp );
[564]2051    process_pid = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid ) );
[436]2052
2053    // check owner cluster
[492]2054    assert( (process_cxy == CXY_FROM_PID( process_pid )) ,
[436]2055    "process descriptor not in owner cluster\n" );
2056
[428]2057    // get extended pointer on stdin pseudo file
[564]2058    file_xp = hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[0] ) );
[428]2059
2060    // get pointers on TXT chdev
2061    txt_xp  = chdev_from_file( file_xp );
2062    txt_cxy = GET_CXY( txt_xp );
[433]2063    txt_ptr = GET_PTR( txt_xp );
[428]2064
[433]2065    // get extended pointer on TXT_RX owner and TXT channel
[564]2066    owner_xp = hal_remote_l64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) );
2067    txt_id   = hal_remote_l32 ( XPTR( txt_cxy , &txt_ptr->channel ) );
[428]2068
[436]2069    // transfer ownership only if process is the TXT owner
2070    if( (owner_xp == process_xp) && (txt_id > 0) ) 
[428]2071    {
[436]2072        // get extended pointers on root and lock of attached processes list
2073        root_xp = XPTR( txt_cxy , &txt_ptr->ext.txt.root );
2074        lock_xp = XPTR( txt_cxy , &txt_ptr->ext.txt.lock );
[428]2075
[436]2076        // get lock
[564]2077        remote_busylock_acquire( lock_xp );
[436]2078
2079        if( process_get_ppid( process_xp ) != 1 )           // process is not KSH
[428]2080        {
[436]2081            // scan attached process list to find KSH process
2082            XLIST_FOREACH( root_xp , iter_xp )
2083            {
2084                current_xp  = XLIST_ELEMENT( iter_xp , process_t , txt_list );
2085                current_cxy = GET_CXY( current_xp );
2086                current_ptr = GET_PTR( current_xp );
[435]2087
[436]2088                if( process_get_ppid( current_xp ) == 1 )  // current is KSH
2089                {
2090                    // release lock
[564]2091                    remote_busylock_release( lock_xp );
[436]2092
2093                    // set owner field in TXT chdev
[564]2094                    hal_remote_s64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , current_xp );
[436]2095
[446]2096#if DEBUG_PROCESS_TXT
[610]2097cycle = (uint32_t)hal_get_cycles();
[564]2098uint32_t ksh_pid = hal_remote_l32( XPTR( current_cxy , &current_ptr->pid ) );
[446]2099if( DEBUG_PROCESS_TXT < cycle )
[610]2100printk("\n[%s] thread[%x,%x] release TXT %d to KSH %x / cycle %d\n",
2101__FUNCTION__, this->process->pid, this->trdid, txt_id, ksh_pid, cycle );
[457]2102process_txt_display( txt_id );
[436]2103#endif
2104                     return;
2105                }
2106            }
2107 
2108            // release lock
[564]2109            remote_busylock_release( lock_xp );
[436]2110
2111            // PANIC if KSH not found
[492]2112            assert( false , "KSH process not found for TXT %d" );
[436]2113
2114            return;
2115        }
2116        else                                               // process is KSH
2117        {
2118            // scan attached process list to find another process
2119            XLIST_FOREACH( root_xp , iter_xp )
[428]2120            {
[436]2121                current_xp  = XLIST_ELEMENT( iter_xp , process_t , txt_list );
2122                current_cxy = GET_CXY( current_xp );
2123                current_ptr = GET_PTR( current_xp );
2124
2125                if( current_xp != process_xp )            // current is not KSH
2126                {
2127                    // release lock
[564]2128                    remote_busylock_release( lock_xp );
[436]2129
2130                    // set owner field in TXT chdev
[564]2131                    hal_remote_s64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , current_xp );
[436]2132
[446]2133#if DEBUG_PROCESS_TXT
[610]2134cycle  = (uint32_t)hal_get_cycles();
[564]2135uint32_t new_pid = hal_remote_l32( XPTR( current_cxy , &current_ptr->pid ) );
[446]2136if( DEBUG_PROCESS_TXT < cycle )
[610]2137printk("\n[%s] thread[%x,%x] release TXT %d to process %x / cycle %d\n",
2138__FUNCTION__,this->process->pid, this->trdid, txt_id, new_pid, cycle );
[457]2139process_txt_display( txt_id );
[436]2140#endif
2141                     return;
2142                }
[428]2143            }
[436]2144
2145            // release lock
[564]2146            remote_busylock_release( lock_xp );
[436]2147
2148            // no more owner for TXT if no other process found
[564]2149            hal_remote_s64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , XPTR_NULL );
[436]2150
[446]2151#if DEBUG_PROCESS_TXT
[436]2152cycle = (uint32_t)hal_get_cycles();
[446]2153if( DEBUG_PROCESS_TXT < cycle )
[610]2154printk("\n[%s] thread[%x,%x] release TXT %d to nobody / cycle %d\n",
2155__FUNCTION__, this->process->pid, this->trdid, txt_id, cycle );
[457]2156process_txt_display( txt_id );
[436]2157#endif
2158            return;
[428]2159        }
[436]2160    }
2161    else
2162    {
[433]2163
[446]2164#if DEBUG_PROCESS_TXT
[436]2165cycle = (uint32_t)hal_get_cycles();
[446]2166if( DEBUG_PROCESS_TXT < cycle )
[593]2167printk("\n[%s] thread %x in process %d does nothing (not TXT owner) / cycle %d\n",
[610]2168__FUNCTION__, this->trdid, process_pid, cycle );
[457]2169process_txt_display( txt_id );
[436]2170#endif
2171
[428]2172    }
[436]2173}  // end process_txt_transfer_ownership()
[428]2174
2175
[564]2176////////////////////////////////////////////////
2177bool_t process_txt_is_owner( xptr_t process_xp )
[457]2178{
2179    // get local pointer and cluster of process in owner cluster
2180    cxy_t       process_cxy = GET_CXY( process_xp );
2181    process_t * process_ptr = GET_PTR( process_xp );
2182
[564]2183// check calling thread execute in target process owner cluster
2184pid_t process_pid = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid ) );
2185assert( (process_cxy == CXY_FROM_PID( process_pid )) ,
2186"process descriptor not in owner cluster\n" );
[457]2187
2188    // get extended pointer on stdin pseudo file
[564]2189    xptr_t file_xp = hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[0] ) );
[457]2190
2191    // get pointers on TXT chdev
2192    xptr_t    txt_xp  = chdev_from_file( file_xp );
2193    cxy_t     txt_cxy = GET_CXY( txt_xp );
2194    chdev_t * txt_ptr = GET_PTR( txt_xp );
2195
2196    // get extended pointer on TXT_RX owner process
[564]2197    xptr_t owner_xp = hal_remote_l64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) );
[457]2198
2199    return (process_xp == owner_xp);
2200
2201}   // end process_txt_is_owner()
2202
[436]2203////////////////////////////////////////////////     
2204xptr_t process_txt_get_owner( uint32_t channel )
[435]2205{
2206    xptr_t      txt_rx_xp  = chdev_dir.txt_rx[channel];
2207    cxy_t       txt_rx_cxy = GET_CXY( txt_rx_xp );
2208    chdev_t *   txt_rx_ptr = GET_PTR( txt_rx_xp );
2209
[564]2210    return (xptr_t)hal_remote_l64( XPTR( txt_rx_cxy , &txt_rx_ptr->ext.txt.owner_xp ) );
[435]2211
[457]2212}  // end process_txt_get_owner()
2213
[435]2214///////////////////////////////////////////
2215void process_txt_display( uint32_t txt_id )
2216{
2217    xptr_t      chdev_xp;
2218    cxy_t       chdev_cxy;
2219    chdev_t   * chdev_ptr;
2220    xptr_t      root_xp;
2221    xptr_t      lock_xp;
2222    xptr_t      current_xp;
2223    xptr_t      iter_xp;
[443]2224    cxy_t       txt0_cxy;
2225    chdev_t   * txt0_ptr;
2226    xptr_t      txt0_xp;
2227    xptr_t      txt0_lock_xp;
2228   
[435]2229    assert( (txt_id < LOCAL_CLUSTER->nb_txt_channels) ,
[492]2230    "illegal TXT terminal index" );
[435]2231
[443]2232    // get pointers on TXT0 chdev
2233    txt0_xp  = chdev_dir.txt_tx[0];
2234    txt0_cxy = GET_CXY( txt0_xp );
2235    txt0_ptr = GET_PTR( txt0_xp );
2236
2237    // get extended pointer on TXT0 lock
2238    txt0_lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock );
2239
[435]2240    // get pointers on TXT_RX[txt_id] chdev
2241    chdev_xp  = chdev_dir.txt_rx[txt_id];
2242    chdev_cxy = GET_CXY( chdev_xp );
2243    chdev_ptr = GET_PTR( chdev_xp );
2244
2245    // get extended pointer on root & lock of attached process list
2246    root_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.root );
2247    lock_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.lock );
2248
[443]2249    // get lock on attached process list
[564]2250    remote_busylock_acquire( lock_xp );
[443]2251
2252    // get TXT0 lock in busy waiting mode
[564]2253    remote_busylock_acquire( txt0_lock_xp );
[443]2254
[435]2255    // display header
[443]2256    nolock_printk("\n***** processes attached to TXT_%d / cycle %d\n",
2257    txt_id , (uint32_t)hal_get_cycles() );
[435]2258
[436]2259    // scan attached process list
[435]2260    XLIST_FOREACH( root_xp , iter_xp )
2261    {
2262        current_xp  = XLIST_ELEMENT( iter_xp , process_t , txt_list );
2263        process_display( current_xp );
2264    }
2265
[443]2266    // release TXT0 lock in busy waiting mode
[564]2267    remote_busylock_release( txt0_lock_xp );
[443]2268
2269    // release lock on attached process list
[564]2270    remote_busylock_release( lock_xp );
[435]2271
2272}  // end process_txt_display
Note: See TracBrowser for help on using the repository browser.