source: trunk/kernel/kern/process.c @ 619

Last change on this file since 619 was 619, checked in by alain, 5 years ago

1) Fix a bug in KSH : after the "load" command,

the [ksh] prompt is now printed after completion
of the loaded application.

2) Fix a bug in vmm_handle_cow() : the copy-on-write

use now a hal_remote_memcpy() to replicate the page content.


File size: 78.5 KB
RevLine 
[1]1/*
[564]2 * process.c - process related functions definition.
[172]3 *
[1]4 * Authors  Ghassan Almaless (2008,2009,2010,2011,2012)
5 *          Mohamed Lamine Karaoui (2015)
[618]6 *          Alain Greiner (2016,2017,2018,2019)
[1]7 *
8 * Copyright (c) UPMC Sorbonne Universites
9 *
[409]10 * This file is part of ALMOS-MKH.
[1]11 *
[172]12 * ALMOS-MKH is free software; you can redistribute it and/or modify it
[1]13 * under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; version 2.0 of the License.
15 *
[172]16 * ALMOS-MKH is distributed in the hope that it will be useful, but
[1]17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
[172]22 * along with ALMOS-MKH; if not, write to the Free Software Foundation,
[1]23 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24 */
25
[14]26#include <kernel_config.h>
[457]27#include <hal_kernel_types.h>
[1]28#include <hal_remote.h>
29#include <hal_uspace.h>
[409]30#include <hal_irqmask.h>
[1]31#include <errno.h>
32#include <printk.h>
33#include <memcpy.h>
34#include <bits.h>
35#include <kmem.h>
36#include <page.h>
37#include <vmm.h>
38#include <vfs.h>
39#include <core.h>
40#include <thread.h>
[428]41#include <chdev.h>
[1]42#include <list.h>
[407]43#include <string.h>
[1]44#include <scheduler.h>
[564]45#include <busylock.h>
46#include <queuelock.h>
47#include <remote_queuelock.h>
48#include <rwlock.h>
49#include <remote_rwlock.h>
[1]50#include <dqdt.h>
51#include <cluster.h>
52#include <ppm.h>
53#include <boot_info.h>
54#include <process.h>
55#include <elf.h>
[23]56#include <syscalls.h>
[435]57#include <shared_syscalls.h>
[1]58
59//////////////////////////////////////////////////////////////////////////////////////////
60// Extern global variables
61//////////////////////////////////////////////////////////////////////////////////////////
62
[428]63extern process_t           process_zero;     // allocated in kernel_init.c
64extern chdev_directory_t   chdev_dir;        // allocated in kernel_init.c
[1]65
66//////////////////////////////////////////////////////////////////////////////////////////
67// Process initialisation related functions
68//////////////////////////////////////////////////////////////////////////////////////////
69
[583]70/////////////////////////////////
[503]71process_t * process_alloc( void )
[1]72{
73        kmem_req_t   req;
74
75    req.type  = KMEM_PROCESS;
76        req.size  = sizeof(process_t);
77        req.flags = AF_KERNEL;
78
79    return (process_t *)kmem_alloc( &req );
80}
81
82////////////////////////////////////////
83void process_free( process_t * process )
84{
85    kmem_req_t  req;
86
87        req.type = KMEM_PROCESS;
88        req.ptr  = process;
89        kmem_free( &req );
90}
91
[101]92/////////////////////////////////////////////////
93void process_reference_init( process_t * process,
94                             pid_t       pid,
[457]95                             xptr_t      parent_xp )
[1]96{
[610]97    xptr_t      process_xp;
[428]98    cxy_t       parent_cxy;
99    process_t * parent_ptr;
[407]100    xptr_t      stdin_xp;
101    xptr_t      stdout_xp;
102    xptr_t      stderr_xp;
103    uint32_t    stdin_id;
104    uint32_t    stdout_id;
105    uint32_t    stderr_id;
[415]106    error_t     error;
[428]107    uint32_t    txt_id;
108    char        rx_path[40];
109    char        tx_path[40];
[440]110    xptr_t      file_xp;
[428]111    xptr_t      chdev_xp;
112    chdev_t *   chdev_ptr;
113    cxy_t       chdev_cxy;
114    pid_t       parent_pid;
[1]115
[610]116    // build extended pointer on this reference process
117    process_xp = XPTR( local_cxy , process );
118
[428]119    // get parent process cluster and local pointer
120    parent_cxy = GET_CXY( parent_xp );
[435]121    parent_ptr = GET_PTR( parent_xp );
[204]122
[457]123    // get parent_pid
[564]124    parent_pid = hal_remote_l32( XPTR( parent_cxy , &parent_ptr->pid ) );
[428]125
[438]126#if DEBUG_PROCESS_REFERENCE_INIT
[610]127thread_t * this = CURRENT_THREAD;
[433]128uint32_t cycle = (uint32_t)hal_get_cycles();
[610]129if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
130printk("\n[%s] thread[%x,%x] enter to initalialize process %x / cycle %d\n",
131__FUNCTION__, parent_pid, this->trdid, pid, cycle );
[433]132#endif
[428]133
[610]134    // initialize pid, ref_xp, parent_xp, owner_xp, term_state fields
[433]135        process->pid        = pid;
136    process->ref_xp     = XPTR( local_cxy , process );
[443]137    process->owner_xp   = XPTR( local_cxy , process );
[433]138    process->parent_xp  = parent_xp;
139    process->term_state = 0;
[428]140
[610]141    // initialize VFS root inode and CWD inode
142    process->vfs_root_xp = hal_remote_l64( XPTR( parent_cxy, &parent_ptr->vfs_root_xp ) );
143    process->cwd_xp      = hal_remote_l64( XPTR( parent_cxy, &parent_ptr->cwd_xp ) );
144
[409]145    // initialize vmm as empty
[415]146    error = vmm_init( process );
[564]147
148assert( (error == 0) , "cannot initialize VMM\n" );
[415]149 
[438]150#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
[433]151cycle = (uint32_t)hal_get_cycles();
[610]152if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
153printk("\n[%s] thread[%x,%x] / vmm empty for process %x / cycle %d\n", 
154__FUNCTION__, parent_pid, this->trdid, pid, cycle );
[433]155#endif
[1]156
[409]157    // initialize fd_array as empty
[408]158    process_fd_init( process );
[1]159
[428]160    // define the stdin/stdout/stderr pseudo files <=> select a TXT terminal.
[581]161    if( (pid == 1) || (parent_pid  == 1) )      // INIT or KSH  process
[408]162    {
[581]163        // select a TXT channel
164        if( pid == 1 )  txt_id = 0;                     // INIT
165        else            txt_id = process_txt_alloc();   // KSH
[428]166
[457]167        // attach process to TXT
[428]168        process_txt_attach( process , txt_id ); 
169
[457]170#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
171cycle = (uint32_t)hal_get_cycles();
[610]172if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
173printk("\n[%s] thread[%x,%x] / process %x attached to TXT%d / cycle %d\n", 
174__FUNCTION__, parent_pid, this->trdid, pid, txt_id, cycle );
[457]175#endif
[428]176        // build path to TXT_RX[i] and TXT_TX[i] chdevs
177        snprintf( rx_path , 40 , "/dev/external/txt%d_rx", txt_id );
178        snprintf( tx_path , 40 , "/dev/external/txt%d_tx", txt_id );
179
180        // create stdin pseudo file         
[610]181        error = vfs_open(  process->vfs_root_xp,
[428]182                           rx_path,
[610]183                           process_xp,
[408]184                           O_RDONLY, 
185                           0,                // FIXME chmod
186                           &stdin_xp, 
187                           &stdin_id );
[1]188
[564]189assert( (error == 0) , "cannot open stdin pseudo file" );
190assert( (stdin_id == 0) , "stdin index must be 0" );
[428]191
[440]192#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
193cycle = (uint32_t)hal_get_cycles();
[610]194if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
195printk("\n[%s] thread[%x,%x] / stdin open for process %x / cycle %d\n", 
196__FUNCTION__, parent_pid, this->trdid, pid, cycle );
[440]197#endif
198
[428]199        // create stdout pseudo file         
[610]200        error = vfs_open(  process->vfs_root_xp,
[428]201                           tx_path,
[610]202                           process_xp,
[408]203                           O_WRONLY, 
204                           0,                // FIXME chmod
205                           &stdout_xp, 
206                           &stdout_id );
[1]207
[492]208        assert( (error == 0) , "cannot open stdout pseudo file" );
209        assert( (stdout_id == 1) , "stdout index must be 1" );
[428]210
[440]211#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
212cycle = (uint32_t)hal_get_cycles();
[610]213if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
214printk("\n[%s] thread[%x,%x] / stdout open for process %x / cycle %d\n", 
215__FUNCTION__, parent_pid, this->trdid, pid, cycle );
[440]216#endif
217
[428]218        // create stderr pseudo file         
[610]219        error = vfs_open(  process->vfs_root_xp,
[428]220                           tx_path,
[610]221                           process_xp,
[408]222                           O_WRONLY, 
223                           0,                // FIXME chmod
224                           &stderr_xp, 
225                           &stderr_id );
[428]226
[492]227        assert( (error == 0) , "cannot open stderr pseudo file" );
228        assert( (stderr_id == 2) , "stderr index must be 2" );
[428]229
[440]230#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
231cycle = (uint32_t)hal_get_cycles();
[610]232if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
233printk("\n[%s] thread[%x,%x] / stderr open for process %x / cycle %d\n", 
234__FUNCTION__, parent_pid, this->trdid, pid, cycle );
[440]235#endif
236
[408]237    }
[428]238    else                                            // normal user process
[408]239    {
[457]240        // get extended pointer on stdin pseudo file in parent process
[564]241        file_xp = (xptr_t)hal_remote_l64( XPTR( parent_cxy , &parent_ptr->fd_array.array[0] ) );
[440]242
[457]243        // get extended pointer on parent process TXT chdev
[440]244        chdev_xp = chdev_from_file( file_xp );
[428]245 
246        // get cluster and local pointer on chdev
247        chdev_cxy = GET_CXY( chdev_xp );
[435]248        chdev_ptr = GET_PTR( chdev_xp );
[428]249 
[564]250        // get parent process TXT terminal index
251        txt_id = hal_remote_l32( XPTR( chdev_cxy , &chdev_ptr->channel ) );
[407]252
[564]253        // attach child process to parent process TXT terminal
[428]254        process_txt_attach( process , txt_id ); 
[407]255
[457]256        // copy all open files from parent process fd_array to this process
[428]257        process_fd_remote_copy( XPTR( local_cxy , &process->fd_array ),
[457]258                                XPTR( parent_cxy , &parent_ptr->fd_array ) );
[408]259    }
[407]260
[610]261    // initialize lock protecting CWD changes
262    remote_busylock_init( XPTR( local_cxy , &process->cwd_lock ), LOCK_PROCESS_CWD );
[408]263
[438]264#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
[433]265cycle = (uint32_t)hal_get_cycles();
[610]266if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
267printk("\n[%s] thread[%x,%x] / set fd_array for process %x / cycle %d\n", 
268__FUNCTION__, parent_pid, this->trdid, pid , cycle );
[433]269#endif
[407]270
[408]271    // reset children list root
272    xlist_root_init( XPTR( local_cxy , &process->children_root ) );
273    process->children_nr     = 0;
[564]274    remote_queuelock_init( XPTR( local_cxy , &process->children_lock ), LOCK_PROCESS_CHILDREN );
[407]275
[611]276    // reset semaphore / mutex / barrier / condvar list roots and lock
[408]277    xlist_root_init( XPTR( local_cxy , &process->sem_root ) );
278    xlist_root_init( XPTR( local_cxy , &process->mutex_root ) );
279    xlist_root_init( XPTR( local_cxy , &process->barrier_root ) );
280    xlist_root_init( XPTR( local_cxy , &process->condvar_root ) );
[564]281    remote_queuelock_init( XPTR( local_cxy , &process->sync_lock ), LOCK_PROCESS_USERSYNC );
[407]282
[611]283    // reset open directories root and lock
284    xlist_root_init( XPTR( local_cxy , &process->dir_root ) );
285    remote_queuelock_init( XPTR( local_cxy , &process->dir_lock ), LOCK_PROCESS_DIR );
286
[408]287    // register new process in the local cluster manager pref_tbl[]
288    lpid_t lpid = LPID_FROM_PID( pid );
289    LOCAL_CLUSTER->pmgr.pref_tbl[lpid] = XPTR( local_cxy , process );
[407]290
[408]291    // register new process descriptor in local cluster manager local_list
292    cluster_process_local_link( process );
[407]293
[408]294    // register new process descriptor in local cluster manager copies_list
295    cluster_process_copies_link( process );
[172]296
[564]297    // initialize th_tbl[] array and associated threads
[1]298    uint32_t i;
[564]299
300    for( i = 0 ; i < CONFIG_THREADS_MAX_PER_CLUSTER ; i++ )
[1]301        {
302        process->th_tbl[i] = NULL;
303    }
304    process->th_nr  = 0;
[564]305    rwlock_init( &process->th_lock , LOCK_PROCESS_THTBL );
[1]306
[124]307        hal_fence();
[1]308
[438]309#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
[433]310cycle = (uint32_t)hal_get_cycles();
[610]311if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
312printk("\n[%s] thread[%x,%x] exit for process %x / cycle %d\n", 
313__FUNCTION__, parent_pid, this->trdid, pid, cycle );
[433]314#endif
[101]315
[428]316}  // process_reference_init()
[204]317
[1]318/////////////////////////////////////////////////////
319error_t process_copy_init( process_t * local_process,
320                           xptr_t      reference_process_xp )
321{
[415]322    error_t error;
323
[23]324    // get reference process cluster and local pointer
325    cxy_t       ref_cxy = GET_CXY( reference_process_xp );
[435]326    process_t * ref_ptr = GET_PTR( reference_process_xp );
[1]327
[428]328    // initialize PID, REF_XP, PARENT_XP, and STATE
[564]329    local_process->pid        = hal_remote_l32(  XPTR( ref_cxy , &ref_ptr->pid ) );
330    local_process->parent_xp  = hal_remote_l64( XPTR( ref_cxy , &ref_ptr->parent_xp ) );
[433]331    local_process->ref_xp     = reference_process_xp;
[443]332    local_process->owner_xp   = reference_process_xp;
[433]333    local_process->term_state = 0;
[407]334
[564]335#if DEBUG_PROCESS_COPY_INIT
[610]336thread_t * this = CURRENT_THREAD; 
[433]337uint32_t cycle = (uint32_t)hal_get_cycles();
[610]338if( DEBUG_PROCESS_COPY_INIT < cycle )
339printk("\n[%s] thread[%x,%x] enter for process %x / cycle %d\n",
340__FUNCTION__, this->process->pid, this->trdid, local_process->pid, cycle );
[433]341#endif
[407]342
[564]343// check user process
344assert( (local_process->pid != 0), "PID cannot be 0" );
345
[172]346    // reset local process vmm
[415]347    error = vmm_init( local_process );
[492]348    assert( (error == 0) , "cannot initialize VMM\n");
[1]349
[172]350    // reset process file descriptors array
[23]351        process_fd_init( local_process );
[1]352
[610]353    // reset vfs_root_xp / vfs_bin_xp / cwd_xp fields
[564]354    local_process->vfs_root_xp = hal_remote_l64( XPTR( ref_cxy , &ref_ptr->vfs_root_xp ) );
355    local_process->vfs_bin_xp  = hal_remote_l64( XPTR( ref_cxy , &ref_ptr->vfs_bin_xp ) );
[610]356    local_process->cwd_xp      = XPTR_NULL;
[1]357
358    // reset children list root (not used in a process descriptor copy)
359    xlist_root_init( XPTR( local_cxy , &local_process->children_root ) );
[172]360    local_process->children_nr   = 0;
[564]361    remote_queuelock_init( XPTR( local_cxy , &local_process->children_lock ),
362                           LOCK_PROCESS_CHILDREN );
[1]363
[428]364    // reset children_list (not used in a process descriptor copy)
365    xlist_entry_init( XPTR( local_cxy , &local_process->children_list ) );
[1]366
367    // reset semaphores list root (not used in a process descriptor copy)
368    xlist_root_init( XPTR( local_cxy , &local_process->sem_root ) );
[23]369    xlist_root_init( XPTR( local_cxy , &local_process->mutex_root ) );
370    xlist_root_init( XPTR( local_cxy , &local_process->barrier_root ) );
371    xlist_root_init( XPTR( local_cxy , &local_process->condvar_root ) );
[1]372
[564]373    // initialize th_tbl[] array and associated fields
[1]374    uint32_t i;
[564]375    for( i = 0 ; i < CONFIG_THREADS_MAX_PER_CLUSTER ; i++ )
[1]376        {
377        local_process->th_tbl[i] = NULL;
378    }
379    local_process->th_nr  = 0;
[564]380    rwlock_init( &local_process->th_lock , LOCK_PROCESS_THTBL );
[1]381
[564]382
[1]383    // register new process descriptor in local cluster manager local_list
384    cluster_process_local_link( local_process );
385
386    // register new process descriptor in owner cluster manager copies_list
387    cluster_process_copies_link( local_process );
388
[124]389        hal_fence();
[1]390
[438]391#if DEBUG_PROCESS_COPY_INIT
[433]392cycle = (uint32_t)hal_get_cycles();
[610]393if( DEBUG_PROCESS_COPY_INIT < cycle )
394printk("\n[%s] thread[%x,%x] exit for process %x / cycle %d\n",
395__FUNCTION__, this->process->pid, this->trdid, local_process->pid, cycle );
[433]396#endif
[279]397
[1]398    return 0;
399
[204]400} // end process_copy_init()
401
[1]402///////////////////////////////////////////
403void process_destroy( process_t * process )
404{
[428]405    xptr_t      parent_xp;
406    process_t * parent_ptr;
407    cxy_t       parent_cxy;
408    xptr_t      children_lock_xp;
[446]409    xptr_t      children_nr_xp;
[1]410
[437]411    pid_t       pid = process->pid;
412
[593]413// check no more threads
[618]414assert( (process->th_nr == 0),
415"process %x in cluster %x contains threads", pid , local_cxy );
[428]416
[438]417#if DEBUG_PROCESS_DESTROY
[610]418thread_t * this = CURRENT_THREAD;
[433]419uint32_t cycle = (uint32_t)hal_get_cycles();
[610]420if( DEBUG_PROCESS_DESTROY < cycle )
421printk("\n[%s] thread[%x,%x] enter for process %x in cluster %x / cycle %d\n",
422__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy, cycle );
[433]423#endif
[428]424
[618]425    // Destroy VMM
426    vmm_destroy( process );
427
428#if (DEBUG_PROCESS_DESTROY & 1)
429if( DEBUG_PROCESS_DESTROY < cycle )
430printk("\n[%s] thread[%x,%x] destroyed VMM for process %x in cluster %x\n",
431__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy );
432#endif
433
[436]434    // remove process from local_list in local cluster manager
435    cluster_process_local_unlink( process );
[1]436
[618]437#if (DEBUG_PROCESS_DESTROY & 1)
438if( DEBUG_PROCESS_DESTROY < cycle )
439printk("\n[%s] thread[%x,%x] removed process %x in cluster %x from local list\n",
440__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy );
441#endif
442
[436]443    // remove process from copies_list in owner cluster manager
444    cluster_process_copies_unlink( process );
[23]445
[618]446#if (DEBUG_PROCESS_DESTROY & 1)
447if( DEBUG_PROCESS_DESTROY < cycle )
448printk("\n[%s] thread[%x,%x] removed process %x in cluster %x from copies list\n",
449__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy );
450#endif
451
[450]452    // remove process from children_list
453    // and release PID if owner cluster
[437]454    if( CXY_FROM_PID( pid ) == local_cxy )
[428]455    {
456        // get pointers on parent process
457        parent_xp  = process->parent_xp;
458        parent_cxy = GET_CXY( parent_xp );
459        parent_ptr = GET_PTR( parent_xp );
460
461        // get extended pointer on children_lock in parent process
462        children_lock_xp = XPTR( parent_cxy , &parent_ptr->children_lock );
[446]463        children_nr_xp   = XPTR( parent_cxy , &parent_ptr->children_nr );
[428]464
465        // remove process from children_list
[564]466        remote_queuelock_acquire( children_lock_xp );
[428]467        xlist_unlink( XPTR( local_cxy , &process->children_list ) );
[446]468            hal_remote_atomic_add( children_nr_xp , -1 );
[564]469        remote_queuelock_release( children_lock_xp );
[450]470
[618]471#if (DEBUG_PROCESS_DESTROY & 1)
472if( DEBUG_PROCESS_DESTROY < cycle )
473printk("\n[%s] thread[%x,%x] removed process %x in cluster %x from children list\n",
474__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy );
475#endif
476
[564]477        // release the process PID to cluster manager
478        cluster_pid_release( pid );
[428]479
[618]480#if (DEBUG_PROCESS_DESTROY & 1)
481if( DEBUG_PROCESS_DESTROY < cycle )
482printk("\n[%s] thread[%x,%x] released process PID %x to pmgr in cluster %x\n",
483__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy );
484#endif
[23]485
[618]486    }
[1]487
[618]488    // FIXME decrement the refcount on file pointer by vfs_bin_xp [AG]
489    // FIXME close all open files [AG]
490    // FIXME synchronize dirty files [AG]
[1]491
[416]492    // release memory allocated to process descriptor
493    process_free( process );
[1]494
[438]495#if DEBUG_PROCESS_DESTROY
[433]496cycle = (uint32_t)hal_get_cycles();
[610]497if( DEBUG_PROCESS_DESTROY < cycle )
498printk("\n[%s] thread[%x,%x] exit / process %x in cluster %x / cycle %d\n",
499__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy, cycle );
[433]500#endif
[428]501
[407]502}  // end process_destroy()
503
[583]504///////////////////////////////////////////////////////////////////
[527]505const char * process_action_str( process_sigactions_t action_type )
[409]506{
[583]507    switch ( action_type )
508    {
509        case BLOCK_ALL_THREADS:   return "BLOCK";
510        case UNBLOCK_ALL_THREADS: return "UNBLOCK";
511        case DELETE_ALL_THREADS:  return "DELETE";
512        default:                  return "undefined";
513    }
[409]514}
515
[435]516////////////////////////////////////////
517void process_sigaction( pid_t       pid,
[457]518                        uint32_t    type )
[409]519{
520    cxy_t              owner_cxy;         // owner cluster identifier
521    lpid_t             lpid;              // process index in owner cluster
522    cluster_t        * cluster;           // pointer on cluster manager
523    xptr_t             root_xp;           // extended pointer on root of copies
524    xptr_t             lock_xp;           // extended pointer on lock protecting copies
525    xptr_t             iter_xp;           // iterator on copies list
526    xptr_t             process_xp;        // extended pointer on process copy
527    cxy_t              process_cxy;       // process copy cluster identifier
[457]528    process_t        * process_ptr;       // local pointer on process copy
[436]529    reg_t              save_sr;           // for critical section
[457]530    thread_t         * client;            // pointer on client thread
531    xptr_t             client_xp;         // extended pointer on client thread
532    process_t        * local;             // pointer on process copy in local cluster
533    uint32_t           remote_nr;         // number of remote process copies
[619]534    rpc_desc_t         rpc;               // shared RPC descriptor
535    uint32_t           responses;         // shared RPC responses counter
[409]536
[457]537    client    = CURRENT_THREAD;
538    client_xp = XPTR( local_cxy , client );
539    local     = NULL;
540    remote_nr = 0;
[435]541
[583]542    // check calling thread can yield
543    thread_assert_can_yield( client , __FUNCTION__ );
[564]544
[438]545#if DEBUG_PROCESS_SIGACTION
[433]546uint32_t cycle = (uint32_t)hal_get_cycles();
[438]547if( DEBUG_PROCESS_SIGACTION < cycle )
[593]548printk("\n[%s] thread[%x,%x] enter to %s process %x / cycle %d\n",
[583]549__FUNCTION__ , client->process->pid, client->trdid,
[457]550process_action_str( type ) , pid , cycle );
[433]551#endif
[409]552
[436]553    // get pointer on local cluster manager
[416]554    cluster = LOCAL_CLUSTER;
555
[409]556    // get owner cluster identifier and process lpid
[435]557    owner_cxy = CXY_FROM_PID( pid );
558    lpid      = LPID_FROM_PID( pid );
[409]559
[593]560    // get root of list of copies and lock from owner cluster
[436]561    root_xp   = XPTR( owner_cxy , &cluster->pmgr.copies_root[lpid] );
562    lock_xp   = XPTR( owner_cxy , &cluster->pmgr.copies_lock[lpid] );
[435]563
[583]564// check action type
565assert( ((type == DELETE_ALL_THREADS ) ||
566         (type == BLOCK_ALL_THREADS )  ||
567         (type == UNBLOCK_ALL_THREADS )), "illegal action type" );
[416]568             
[593]569    // This client thread send parallel RPCs to all remote clusters containing
[564]570    // target process copies, wait all responses, and then handles directly
571    // the threads in local cluster, when required.
[457]572    // The client thread allocates a - shared - RPC descriptor in the stack,
573    // because all parallel, non-blocking, server threads use the same input
574    // arguments, and use the shared RPC response field
[436]575
576    // mask IRQs
577    hal_disable_irq( &save_sr);
578
[457]579    // client thread blocks itself
580    thread_block( client_xp , THREAD_BLOCKED_RPC );
[436]581
[619]582    // initialize RPC responses counter
583    responses = 0;
584
[436]585    // initialize shared RPC descriptor
[619]586    // can be shared, because no out arguments
587    rpc.rsp       = &responses;
[438]588    rpc.blocking  = false;
589    rpc.index     = RPC_PROCESS_SIGACTION;
590    rpc.thread    = client;
591    rpc.lid       = client->core->lid;
[611]592    rpc.args[0]   = pid;
593    rpc.args[1]   = type;
[436]594
[611]595    // take the lock protecting process copies
596    remote_queuelock_acquire( lock_xp );
597
[457]598    // scan list of process copies
[409]599    XLIST_FOREACH( root_xp , iter_xp )
600    {
[457]601        // get extended pointers and cluster on process
[440]602        process_xp  = XLIST_ELEMENT( iter_xp , process_t , copies_list );
603        process_cxy = GET_CXY( process_xp );
[457]604        process_ptr = GET_PTR( process_xp );
[440]605
[593]606        if( process_cxy == local_cxy )    // process copy is local
[457]607        { 
608            local = process_ptr;
609        }
[593]610        else                              // process copy is remote
[457]611        {
612            // update number of remote process copies
613            remote_nr++;
614
[619]615            // atomically increment RPC responses counter
616            hal_atomic_add( &responses , 1 );
[457]617
[438]618#if DEBUG_PROCESS_SIGACTION
619if( DEBUG_PROCESS_SIGACTION < cycle )
[593]620printk("\n[%s] thread[%x,%x] send RPC to cluster %x for process %x\n",
[583]621__FUNCTION__, client->process->pid, client->trdid, process_cxy, pid );
[433]622#endif
[457]623            // call RPC in target cluster
[619]624            rpc_send( process_cxy , &rpc );
[457]625        }
626    }  // end list of copies
627
[409]628    // release the lock protecting process copies
[564]629    remote_queuelock_release( lock_xp );
[409]630
[436]631    // restore IRQs
632    hal_restore_irq( save_sr);
[409]633
[457]634    // - if there is remote process copies, the client thread deschedules,
635    //   (it will be unblocked by the last RPC server thread).
636    // - if there is no remote copies, the client thread unblock itself.
637    if( remote_nr )
638    {
639        sched_yield("blocked on rpc_process_sigaction");
640    } 
641    else
642    {
643        thread_unblock( client_xp , THREAD_BLOCKED_RPC );
644    }
[409]645
[457]646    // handle the local process copy if required
647    if( local != NULL )
648    {
649
650#if DEBUG_PROCESS_SIGACTION
651if( DEBUG_PROCESS_SIGACTION < cycle )
[593]652printk("\n[%s] thread[%x,%x] handles local process %x in cluster %x\n",
[583]653__FUNCTION__, client->process->pid, client->trdid, pid , local_cxy );
[457]654#endif
655        if     (type == DELETE_ALL_THREADS  ) process_delete_threads ( local , client_xp ); 
[583]656        else if(type == BLOCK_ALL_THREADS   ) process_block_threads  ( local ); 
[457]657        else if(type == UNBLOCK_ALL_THREADS ) process_unblock_threads( local );
658    }
659
[438]660#if DEBUG_PROCESS_SIGACTION
[433]661cycle = (uint32_t)hal_get_cycles();
[438]662if( DEBUG_PROCESS_SIGACTION < cycle )
[593]663printk("\n[%s] thread[%x,%x] exit after %s process %x / cycle %d\n",
[583]664__FUNCTION__, client->process->pid, client->trdid,
[457]665process_action_str( type ), pid, cycle );
[433]666#endif
[416]667
[409]668}  // end process_sigaction()
669
[433]670/////////////////////////////////////////////////
[583]671void process_block_threads( process_t * process )
[1]672{
[409]673    thread_t          * target;         // pointer on target thread
[433]674    thread_t          * this;           // pointer on calling thread
[564]675    uint32_t            ltid;           // index in process th_tbl[]
[436]676    cxy_t               owner_cxy;      // target process owner cluster
[409]677    uint32_t            count;          // requests counter
[593]678    volatile uint32_t   ack_count;      // acknowledges counter
[1]679
[416]680    // get calling thread pointer
[433]681    this = CURRENT_THREAD;
[407]682
[438]683#if DEBUG_PROCESS_SIGACTION
[564]684pid_t pid = process->pid;
[433]685uint32_t cycle = (uint32_t)hal_get_cycles();
[438]686if( DEBUG_PROCESS_SIGACTION < cycle )
[593]687printk("\n[%s] thread[%x,%x] enter for process %x in cluster %x / cycle %d\n",
[583]688__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy , cycle );
[433]689#endif
[409]690
[564]691// check target process is an user process
[619]692assert( (LPID_FROM_PID( process->pid ) != 0 ),
693"process %x is not an user process\n", process->pid );
[564]694
[610]695    // get target process owner cluster
[564]696    owner_cxy = CXY_FROM_PID( process->pid );
697
[409]698    // get lock protecting process th_tbl[]
[564]699    rwlock_rd_acquire( &process->th_lock );
[1]700
[440]701    // loop on target process local threads
[409]702    // we use both "ltid" and "count" because it can exist "holes" in th_tbl
[593]703    // - if the calling thread and the target thread are not running on the same
704    //   core, we ask the target scheduler to acknowlege the blocking
705    //   to be sure that the target thread is not running.
706    // - if the calling thread and the target thread are running on the same core,
707    //   we don't need confirmation from scheduler.
708           
[436]709    for( ltid = 0 , count = 0 , ack_count = 0 ; count < process->th_nr ; ltid++ )
[1]710    {
[409]711        target = process->th_tbl[ltid];
[1]712
[436]713        if( target != NULL )                                 // thread exist
[1]714        {
715            count++;
[409]716
[583]717            // set the global blocked bit in target thread descriptor.
718            thread_block( XPTR( local_cxy , target ) , THREAD_BLOCKED_GLOBAL );
[436]719 
[583]720            if( this->core->lid != target->core->lid )
721            {
722                // increment responses counter
723                hal_atomic_add( (void*)&ack_count , 1 );
[409]724
[583]725                // set FLAG_REQ_ACK and &ack_rsp_count in target descriptor
726                thread_set_req_ack( target , (uint32_t *)&ack_count );
[409]727
[583]728                // force scheduling on target thread
729                dev_pic_send_ipi( local_cxy , target->core->lid );
[409]730            }
[1]731        }
[172]732    }
733
[428]734    // release lock protecting process th_tbl[]
[564]735    rwlock_rd_release( &process->th_lock );
[416]736
[593]737    // wait other threads acknowledges  TODO this could be improved...
[409]738    while( 1 )
739    {
[610]740        // exit when all scheduler acknowledges received
[436]741        if ( ack_count == 0 ) break;
[409]742   
743        // wait 1000 cycles before retry
744        hal_fixed_delay( 1000 );
745    }
[1]746
[438]747#if DEBUG_PROCESS_SIGACTION
[433]748cycle = (uint32_t)hal_get_cycles();
[438]749if( DEBUG_PROCESS_SIGACTION < cycle )
[593]750printk("\n[%s] thread[%x,%x] exit for process %x in cluster %x / cycle %d\n",
751__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy , cycle );
[433]752#endif
[409]753
[428]754}  // end process_block_threads()
[409]755
[440]756/////////////////////////////////////////////////
757void process_delete_threads( process_t * process,
758                             xptr_t      client_xp )
[409]759{
[433]760    thread_t          * this;          // pointer on calling thread
[440]761    thread_t          * target;        // local pointer on target thread
762    xptr_t              target_xp;     // extended pointer on target thread
763    cxy_t               owner_cxy;     // owner process cluster
[409]764    uint32_t            ltid;          // index in process th_tbl
[440]765    uint32_t            count;         // threads counter
[409]766
[433]767    // get calling thread pointer
768    this = CURRENT_THREAD;
[409]769
[440]770    // get target process owner cluster
771    owner_cxy = CXY_FROM_PID( process->pid );
772
[438]773#if DEBUG_PROCESS_SIGACTION
[433]774uint32_t cycle = (uint32_t)hal_get_cycles();
[438]775if( DEBUG_PROCESS_SIGACTION < cycle )
[593]776printk("\n[%s] thread[%x,%x] enter in cluster %x for process %x / cycle %d\n",
[583]777__FUNCTION__, this->process->pid, this->trdid, local_cxy, process->pid, cycle );
[433]778#endif
779
[564]780// check target process is an user process
[619]781assert( (LPID_FROM_PID( process->pid ) != 0),
782"process %x is not an user process\n", process->pid );
[564]783
[409]784    // get lock protecting process th_tbl[]
[583]785    rwlock_wr_acquire( &process->th_lock );
[409]786
[440]787    // loop on target process local threads                       
[416]788    // we use both "ltid" and "count" because it can exist "holes" in th_tbl
[440]789    for( ltid = 0 , count = 0  ; count < process->th_nr ; ltid++ )
[1]790    {
[409]791        target = process->th_tbl[ltid];
[1]792
[440]793        if( target != NULL )    // valid thread 
[1]794        {
[416]795            count++;
[440]796            target_xp = XPTR( local_cxy , target );
[1]797
[564]798            // main thread and client thread should not be deleted
[440]799            if( ((ltid != 0) || (owner_cxy != local_cxy)) &&         // not main thread
800                (client_xp) != target_xp )                           // not client thread
801            {
802                // mark target thread for delete and block it
803                thread_delete( target_xp , process->pid , false );   // not forced
804            }
[409]805        }
806    }
[1]807
[428]808    // release lock protecting process th_tbl[]
[583]809    rwlock_wr_release( &process->th_lock );
[407]810
[438]811#if DEBUG_PROCESS_SIGACTION
[433]812cycle = (uint32_t)hal_get_cycles();
[438]813if( DEBUG_PROCESS_SIGACTION < cycle )
[593]814printk("\n[%s] thread[%x,%x] exit for process %x in cluster %x / cycle %d\n",
815__FUNCTION__, this->process->pid, this->trdid, process->pid, local_cxy , cycle );
[433]816#endif
[407]817
[440]818}  // end process_delete_threads()
[409]819
[440]820///////////////////////////////////////////////////
821void process_unblock_threads( process_t * process )
[409]822{
[440]823    thread_t          * target;        // pointer on target thead
824    thread_t          * this;          // pointer on calling thread
[409]825    uint32_t            ltid;          // index in process th_tbl
[440]826    uint32_t            count;         // requests counter
[409]827
[440]828    // get calling thread pointer
829    this = CURRENT_THREAD;
830
[438]831#if DEBUG_PROCESS_SIGACTION
[564]832pid_t pid = process->pid;
[433]833uint32_t cycle = (uint32_t)hal_get_cycles();
[438]834if( DEBUG_PROCESS_SIGACTION < cycle )
[593]835printk("\n[%s] thread[%x,%x] enter for process %x in cluster %x / cycle %d\n",
[583]836__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy , cycle );
[433]837#endif
838
[564]839// check target process is an user process
[619]840assert( ( LPID_FROM_PID( process->pid ) != 0 ),
841"process %x is not an user process\n", process->pid );
[564]842
[416]843    // get lock protecting process th_tbl[]
[564]844    rwlock_rd_acquire( &process->th_lock );
[416]845
[440]846    // loop on process threads to unblock all threads
[416]847    // we use both "ltid" and "count" because it can exist "holes" in th_tbl
[440]848    for( ltid = 0 , count = 0 ; count < process->th_nr ; ltid++ )
[409]849    {
[416]850        target = process->th_tbl[ltid];
[409]851
[440]852        if( target != NULL )             // thread found
[409]853        {
854            count++;
[440]855
856            // reset the global blocked bit in target thread descriptor.
857            thread_unblock( XPTR( local_cxy , target ) , THREAD_BLOCKED_GLOBAL );
[1]858        }
859    }
860
[428]861    // release lock protecting process th_tbl[]
[564]862    rwlock_rd_release( &process->th_lock );
[407]863
[438]864#if DEBUG_PROCESS_SIGACTION
[433]865cycle = (uint32_t)hal_get_cycles();
[438]866if( DEBUG_PROCESS_SIGACTION < cycle )
[593]867printk("\n[%s] thread[%x,%x] exit for process %x in cluster %x / cycle %d\n",
[583]868__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy, cycle );
[433]869#endif
[1]870
[440]871}  // end process_unblock_threads()
[407]872
[1]873///////////////////////////////////////////////
874process_t * process_get_local_copy( pid_t pid )
875{
876    error_t        error;
[172]877    process_t    * process_ptr;   // local pointer on process
[23]878    xptr_t         process_xp;    // extended pointer on process
[1]879
880    cluster_t * cluster = LOCAL_CLUSTER;
881
[564]882#if DEBUG_PROCESS_GET_LOCAL_COPY
883thread_t * this = CURRENT_THREAD;
884uint32_t cycle = (uint32_t)hal_get_cycles();
885if( DEBUG_PROCESS_GET_LOCAL_COPY < cycle )
[593]886printk("\n[%s] thread[%x,%x] enter for process %x in cluster %x / cycle %d\n",
[583]887__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy, cycle );
[564]888#endif
889
[1]890    // get lock protecting local list of processes
[564]891    remote_queuelock_acquire( XPTR( local_cxy , &cluster->pmgr.local_lock ) );
[1]892
893    // scan the local list of process descriptors to find the process
[23]894    xptr_t  iter;
895    bool_t  found = false;
896    XLIST_FOREACH( XPTR( local_cxy , &cluster->pmgr.local_root ) , iter )
[1]897    {
[23]898        process_xp  = XLIST_ELEMENT( iter , process_t , local_list );
[435]899        process_ptr = GET_PTR( process_xp );
[23]900        if( process_ptr->pid == pid )
[1]901        {
902            found = true;
903            break;
904        }
905    }
906
907    // release lock protecting local list of processes
[564]908    remote_queuelock_release( XPTR( local_cxy , &cluster->pmgr.local_lock ) );
[1]909
[172]910    // allocate memory for a new local process descriptor
[440]911    // and initialise it from reference cluster if not found
[1]912    if( !found )
913    {
914        // get extended pointer on reference process descriptor
[23]915        xptr_t ref_xp = cluster_get_reference_process_from_pid( pid );
[1]916
[492]917        assert( (ref_xp != XPTR_NULL) , "illegal pid\n" );
[23]918
[1]919        // allocate memory for local process descriptor
[23]920        process_ptr = process_alloc();
[443]921
[23]922        if( process_ptr == NULL )  return NULL;
[1]923
924        // initialize local process descriptor copy
[23]925        error = process_copy_init( process_ptr , ref_xp );
[443]926
[1]927        if( error ) return NULL;
928    }
929
[440]930#if DEBUG_PROCESS_GET_LOCAL_COPY
[564]931cycle = (uint32_t)hal_get_cycles();
[440]932if( DEBUG_PROCESS_GET_LOCAL_COPY < cycle )
[593]933printk("\n[%s] thread[%x,%x] exit in cluster %x / process %x / cycle %d\n",
[583]934__FUNCTION__, this->process->pid, this->trdid, local_cxy, process_ptr, cycle );
[440]935#endif
936
[23]937    return process_ptr;
[1]938
[409]939}  // end process_get_local_copy()
940
[436]941////////////////////////////////////////////
942pid_t process_get_ppid( xptr_t  process_xp )
943{
944    cxy_t       process_cxy;
945    process_t * process_ptr;
946    xptr_t      parent_xp;
947    cxy_t       parent_cxy;
948    process_t * parent_ptr;
949
950    // get process cluster and local pointer
951    process_cxy = GET_CXY( process_xp );
952    process_ptr = GET_PTR( process_xp );
953
954    // get pointers on parent process
[564]955    parent_xp  = (xptr_t)hal_remote_l64( XPTR( process_cxy , &process_ptr->parent_xp ) );
[436]956    parent_cxy = GET_CXY( parent_xp );
957    parent_ptr = GET_PTR( parent_xp );
958
[564]959    return hal_remote_l32( XPTR( parent_cxy , &parent_ptr->pid ) );
[436]960}
961
[1]962//////////////////////////////////////////////////////////////////////////////////////////
963// File descriptor array related functions
964//////////////////////////////////////////////////////////////////////////////////////////
965
966///////////////////////////////////////////
967void process_fd_init( process_t * process )
968{
969    uint32_t fd;
970
[610]971    // initialize lock
[564]972    remote_queuelock_init( XPTR( local_cxy , &process->fd_array.lock ), LOCK_PROCESS_FDARRAY );
[1]973
[610]974    // initialize number of open files
[23]975    process->fd_array.current = 0;
976
[1]977    // initialize array
[23]978    for ( fd = 0 ; fd < CONFIG_PROCESS_FILE_MAX_NR ; fd++ )
[1]979    {
980        process->fd_array.array[fd] = XPTR_NULL;
981    }
982}
[610]983////////////////////////////////////////////////////
984error_t process_fd_register( xptr_t      process_xp,
[407]985                             xptr_t      file_xp,
986                             uint32_t  * fdid )
[1]987{
988    bool_t    found;
[23]989    uint32_t  id;
990    xptr_t    xp;
[1]991
[23]992    // get reference process cluster and local pointer
[610]993    process_t * process_ptr = GET_PTR( process_xp );
994    cxy_t       process_cxy = GET_CXY( process_xp );
[23]995
[610]996// check client process is reference process
997assert( (process_xp == hal_remote_l64( XPTR( process_cxy , &process_ptr->ref_xp ) ) ),
998"client process must be reference process\n" );
999
1000#if DEBUG_PROCESS_FD_REGISTER
1001thread_t * this  = CURRENT_THREAD;
1002uint32_t   cycle = (uint32_t)hal_get_cycles();
1003pid_t      pid   = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid) );
1004if( DEBUG_PROCESS_FD_REGISTER < cycle )
1005printk("\n[%s] thread[%x,%x] enter for process %x / cycle %d\n",
1006__FUNCTION__, this->process->pid, this->trdid, pid, cycle );
1007#endif
1008
1009    // build extended pointer on lock protecting reference fd_array
1010    xptr_t lock_xp = XPTR( process_cxy , &process_ptr->fd_array.lock );
1011
[23]1012    // take lock protecting reference fd_array
[610]1013        remote_queuelock_acquire( lock_xp );
[23]1014
[1]1015    found   = false;
1016
[23]1017    for ( id = 0; id < CONFIG_PROCESS_FILE_MAX_NR ; id++ )
[1]1018    {
[610]1019        xp = hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[id] ) );
[23]1020        if ( xp == XPTR_NULL )
[1]1021        {
[564]1022            // update reference fd_array
[610]1023            hal_remote_s64( XPTR( process_cxy , &process_ptr->fd_array.array[id] ) , file_xp );
1024                hal_remote_atomic_add( XPTR( process_cxy , &process_ptr->fd_array.current ) , 1 );
[564]1025
1026            // exit
1027                        *fdid = id;
[1]1028            found = true;
1029            break;
1030        }
1031    }
1032
[610]1033    // release lock protecting fd_array
1034        remote_queuelock_release( lock_xp );
[1]1035
[610]1036#if DEBUG_PROCESS_FD_REGISTER
1037cycle = (uint32_t)hal_get_cycles();
1038if( DEBUG_PROCESS_FD_REGISTER < cycle )
1039printk("\n[%s] thread[%x,%x] exit for process %x / fdid %d / cycle %d\n",
1040__FUNCTION__, this->process->pid, this->trdid, pid, id, cycle );
1041#endif
1042
[428]1043    if ( !found ) return -1;
[1]1044    else          return 0;
1045
[610]1046}  // end process_fd_register()
1047
[172]1048////////////////////////////////////////////////
[23]1049xptr_t process_fd_get_xptr( process_t * process,
[407]1050                            uint32_t    fdid )
[1]1051{
[23]1052    xptr_t  file_xp;
[564]1053    xptr_t  lock_xp;
[1]1054
[23]1055    // access local copy of process descriptor
[407]1056    file_xp = process->fd_array.array[fdid];
[1]1057
[23]1058    if( file_xp == XPTR_NULL )
1059    {
1060        // get reference process cluster and local pointer
1061        xptr_t      ref_xp  = process->ref_xp;
1062        cxy_t       ref_cxy = GET_CXY( ref_xp );
[435]1063        process_t * ref_ptr = GET_PTR( ref_xp );
[1]1064
[564]1065        // build extended pointer on lock protecting reference fd_array
1066        lock_xp = XPTR( ref_cxy , &ref_ptr->fd_array.lock );
1067
1068        // take lock protecting reference fd_array
1069            remote_queuelock_acquire( lock_xp );
1070
[23]1071        // access reference process descriptor
[564]1072        file_xp = hal_remote_l64( XPTR( ref_cxy , &ref_ptr->fd_array.array[fdid] ) );
[1]1073
[23]1074        // update local fd_array if found
[564]1075        if( file_xp != XPTR_NULL )  process->fd_array.array[fdid] = file_xp;
1076       
1077        // release lock protecting reference fd_array
1078            remote_queuelock_release( lock_xp );
[23]1079    }
[1]1080
[23]1081    return file_xp;
[1]1082
[407]1083}  // end process_fd_get_xptr()
1084
[1]1085///////////////////////////////////////////
1086void process_fd_remote_copy( xptr_t dst_xp,
1087                             xptr_t src_xp )
1088{
1089    uint32_t fd;
1090    xptr_t   entry;
1091
1092    // get cluster and local pointer for src fd_array
1093    cxy_t        src_cxy = GET_CXY( src_xp );
[435]1094    fd_array_t * src_ptr = GET_PTR( src_xp );
[1]1095
1096    // get cluster and local pointer for dst fd_array
1097    cxy_t        dst_cxy = GET_CXY( dst_xp );
[435]1098    fd_array_t * dst_ptr = GET_PTR( dst_xp );
[1]1099
1100    // get the remote lock protecting the src fd_array
[564]1101        remote_queuelock_acquire( XPTR( src_cxy , &src_ptr->lock ) );
[1]1102
[428]1103    // loop on all fd_array entries
1104    for( fd = 0 ; fd < CONFIG_PROCESS_FILE_MAX_NR ; fd++ )
[1]1105        {
[564]1106                entry = (xptr_t)hal_remote_l64( XPTR( src_cxy , &src_ptr->array[fd] ) );
[1]1107
1108                if( entry != XPTR_NULL )
1109                {
[459]1110            // increment file descriptor refcount
[1]1111            vfs_file_count_up( entry );
1112
1113                        // copy entry in destination process fd_array
[564]1114                        hal_remote_s64( XPTR( dst_cxy , &dst_ptr->array[fd] ) , entry );
[1]1115                }
1116        }
1117
1118    // release lock on source process fd_array
[564]1119        remote_queuelock_release( XPTR( src_cxy , &src_ptr->lock ) );
[1]1120
[407]1121}  // end process_fd_remote_copy()
1122
[564]1123
1124////////////////////////////////////
1125bool_t process_fd_array_full( void )
1126{
1127    // get extended pointer on reference process
1128    xptr_t ref_xp = CURRENT_THREAD->process->ref_xp;
1129
1130    // get reference process cluster and local pointer
1131    process_t * ref_ptr = GET_PTR( ref_xp );
1132    cxy_t       ref_cxy = GET_CXY( ref_xp );
1133
1134    // get number of open file descriptors from reference fd_array
1135    uint32_t current = hal_remote_l32( XPTR( ref_cxy , &ref_ptr->fd_array.current ) );
1136
1137        return ( current >= CONFIG_PROCESS_FILE_MAX_NR );
1138}
1139
1140
[1]1141////////////////////////////////////////////////////////////////////////////////////
1142//  Thread related functions
1143////////////////////////////////////////////////////////////////////////////////////
1144
1145/////////////////////////////////////////////////////
1146error_t process_register_thread( process_t * process,
1147                                 thread_t  * thread,
1148                                 trdid_t   * trdid )
1149{
[472]1150    ltid_t         ltid;
1151    bool_t         found = false;
1152 
[564]1153// check arguments
1154assert( (process != NULL) , "process argument is NULL" );
1155assert( (thread != NULL) , "thread argument is NULL" );
[1]1156
[564]1157    // get the lock protecting th_tbl for all threads
1158    // but the idle thread executing kernel_init (cannot yield)
1159    if( thread->type != THREAD_IDLE ) rwlock_wr_acquire( &process->th_lock );
[1]1160
[583]1161    // scan th_tbl
[564]1162    for( ltid = 0 ; ltid < CONFIG_THREADS_MAX_PER_CLUSTER ; ltid++ )
[1]1163    {
1164        if( process->th_tbl[ltid] == NULL )
1165        {
1166            found = true;
1167            break;
1168        }
1169    }
1170
1171    if( found )
1172    {
1173        // register thread in th_tbl[]
1174        process->th_tbl[ltid] = thread;
1175        process->th_nr++;
1176
1177        // returns trdid
1178        *trdid = TRDID( local_cxy , ltid );
1179    }
1180
[583]1181    // release the lock protecting th_tbl
[564]1182    if( thread->type != THREAD_IDLE ) rwlock_wr_release( &process->th_lock );
[428]1183
[564]1184    return (found) ? 0 : 0xFFFFFFFF;
[204]1185
1186}  // end process_register_thread()
1187
[443]1188/////////////////////////////////////////////////
1189bool_t process_remove_thread( thread_t * thread )
[1]1190{
[443]1191    uint32_t count;  // number of threads in local process descriptor
1192
[1]1193    process_t * process = thread->process;
1194
1195    // get thread local index
1196    ltid_t  ltid = LTID_FROM_TRDID( thread->trdid );
[564]1197   
1198    // get the lock protecting th_tbl[]
1199    rwlock_wr_acquire( &process->th_lock );
[428]1200
[583]1201    // get number of threads
[443]1202    count = process->th_nr;
[428]1203
[583]1204// check thread
1205assert( (thread != NULL) , "thread argument is NULL" );
1206
[564]1207// check th_nr value
[583]1208assert( (count > 0) , "process th_nr cannot be 0\n" );
[443]1209
[1]1210    // remove thread from th_tbl[]
1211    process->th_tbl[ltid] = NULL;
[450]1212    process->th_nr = count-1;
[1]1213
[583]1214    // release lock protecting th_tbl
[564]1215    rwlock_wr_release( &process->th_lock );
[428]1216
[443]1217    return (count == 1);
1218
[450]1219}  // end process_remove_thread()
[204]1220
[408]1221/////////////////////////////////////////////////////////
1222error_t process_make_fork( xptr_t      parent_process_xp,
1223                           xptr_t      parent_thread_xp,
1224                           pid_t     * child_pid,
1225                           thread_t ** child_thread )
[1]1226{
[408]1227    process_t * process;         // local pointer on child process descriptor
1228    thread_t  * thread;          // local pointer on child thread descriptor
1229    pid_t       new_pid;         // process identifier for child process
1230    pid_t       parent_pid;      // process identifier for parent process
1231    xptr_t      ref_xp;          // extended pointer on reference process
[428]1232    xptr_t      vfs_bin_xp;      // extended pointer on .elf file
[408]1233    error_t     error;
[1]1234
[408]1235    // get cluster and local pointer for parent process
1236    cxy_t       parent_process_cxy = GET_CXY( parent_process_xp );
[435]1237    process_t * parent_process_ptr = GET_PTR( parent_process_xp );
[101]1238
[428]1239    // get parent process PID and extended pointer on .elf file
[564]1240    parent_pid = hal_remote_l32 (XPTR( parent_process_cxy , &parent_process_ptr->pid));
1241    vfs_bin_xp = hal_remote_l64(XPTR( parent_process_cxy , &parent_process_ptr->vfs_bin_xp));
[428]1242
[564]1243    // get extended pointer on reference process
1244    ref_xp = hal_remote_l64( XPTR( parent_process_cxy , &parent_process_ptr->ref_xp ) );
[438]1245
[564]1246// check parent process is the reference process
1247assert( (parent_process_xp == ref_xp ) ,
1248"parent process must be the reference process\n" );
[407]1249
[438]1250#if DEBUG_PROCESS_MAKE_FORK
[583]1251uint32_t cycle   = (uint32_t)hal_get_cycles();
1252thread_t * this  = CURRENT_THREAD;
1253trdid_t    trdid = this->trdid;
1254pid_t      pid   = this->process->pid;
[438]1255if( DEBUG_PROCESS_MAKE_FORK < cycle )
[593]1256printk("\n[%s] thread[%x,%x] enter / cluster %x / cycle %d\n",
[583]1257__FUNCTION__, pid, trdid, local_cxy, cycle );
[433]1258#endif
[172]1259
[408]1260    // allocate a process descriptor
1261    process = process_alloc();
1262    if( process == NULL )
1263    {
1264        printk("\n[ERROR] in %s : cannot get process in cluster %x\n", 
1265        __FUNCTION__, local_cxy ); 
1266        return -1;
1267    }
[1]1268
[408]1269    // allocate a child PID from local cluster
[416]1270    error = cluster_pid_alloc( process , &new_pid );
[428]1271    if( error ) 
[1]1272    {
[408]1273        printk("\n[ERROR] in %s : cannot get PID in cluster %x\n", 
1274        __FUNCTION__, local_cxy ); 
1275        process_free( process );
1276        return -1;
[1]1277    }
[408]1278
[469]1279#if( DEBUG_PROCESS_MAKE_FORK & 1 )
[457]1280cycle = (uint32_t)hal_get_cycles();
1281if( DEBUG_PROCESS_MAKE_FORK < cycle )
[593]1282printk("\n[%s] thread[%x,%x] allocated process %x / cycle %d\n",
[583]1283__FUNCTION__, pid, trdid, new_pid, cycle );
[457]1284#endif
1285
[408]1286    // initializes child process descriptor from parent process descriptor
1287    process_reference_init( process,
1288                            new_pid,
1289                            parent_process_xp );
1290
[438]1291#if( DEBUG_PROCESS_MAKE_FORK & 1 )
[433]1292cycle = (uint32_t)hal_get_cycles();
[438]1293if( DEBUG_PROCESS_MAKE_FORK < cycle )
[593]1294printk("\n[%s] thread[%x,%x] initialized child_process %x / cycle %d\n",
[583]1295__FUNCTION__, pid, trdid, new_pid, cycle );
[433]1296#endif
[408]1297
[457]1298
[408]1299    // copy VMM from parent descriptor to child descriptor
1300    error = vmm_fork_copy( process,
1301                           parent_process_xp );
1302    if( error )
[101]1303    {
[408]1304        printk("\n[ERROR] in %s : cannot copy VMM in cluster %x\n", 
1305        __FUNCTION__, local_cxy ); 
1306        process_free( process );
1307        cluster_pid_release( new_pid );
1308        return -1;
[101]1309    }
[172]1310
[438]1311#if( DEBUG_PROCESS_MAKE_FORK & 1 )
[433]1312cycle = (uint32_t)hal_get_cycles();
[438]1313if( DEBUG_PROCESS_MAKE_FORK < cycle )
[593]1314printk("\n[%s] thread[%x,%x] copied VMM from parent to child / cycle %d\n",
[583]1315__FUNCTION__, pid, trdid, cycle );
[433]1316#endif
[407]1317
[564]1318    // if parent_process is INIT, or if parent_process is the TXT owner,
1319    // the child_process becomes the owner of its TXT terminal
1320    if( (parent_pid == 1) || process_txt_is_owner( parent_process_xp ) )
[457]1321    {
1322        process_txt_set_ownership( XPTR( local_cxy , process ) );
1323
1324#if( DEBUG_PROCESS_MAKE_FORK & 1 )
1325cycle = (uint32_t)hal_get_cycles();
1326if( DEBUG_PROCESS_MAKE_EXEC < cycle )
[593]1327printk("\n[%s] thread[%x,%x] / child takes TXT ownership / cycle %d\n",
[583]1328__FUNCTION__ , pid, trdid, cycle );
[457]1329#endif
1330
1331    }
1332
[428]1333    // update extended pointer on .elf file
1334    process->vfs_bin_xp = vfs_bin_xp;
1335
[408]1336    // create child thread descriptor from parent thread descriptor
1337    error = thread_user_fork( parent_thread_xp,
1338                              process,
1339                              &thread );
1340    if( error )
1341    {
1342        printk("\n[ERROR] in %s : cannot create thread in cluster %x\n",
1343        __FUNCTION__, local_cxy ); 
1344        process_free( process );
1345        cluster_pid_release( new_pid );
1346        return -1;
1347    }
[172]1348
[564]1349// check main thread LTID
1350assert( (LTID_FROM_TRDID(thread->trdid) == 0) ,
1351"main thread must have LTID == 0\n" );
[428]1352
[564]1353#if( DEBUG_PROCESS_MAKE_FORK & 1 )
[433]1354cycle = (uint32_t)hal_get_cycles();
[438]1355if( DEBUG_PROCESS_MAKE_FORK < cycle )
[593]1356printk("\n[%s] thread[%x,%x] created main thread %x / cycle %d\n", 
[583]1357__FUNCTION__, pid, trdid, thread, cycle );
[433]1358#endif
[1]1359
[433]1360    // set Copy_On_Write flag in parent process GPT
[408]1361    // this includes all replicated GPT copies
1362    if( parent_process_cxy == local_cxy )   // reference is local
1363    {
1364        vmm_set_cow( parent_process_ptr );
1365    }
1366    else                                    // reference is remote
1367    {
1368        rpc_vmm_set_cow_client( parent_process_cxy,
1369                                parent_process_ptr );
1370    }
[1]1371
[433]1372    // set Copy_On_Write flag in child process GPT
1373    vmm_set_cow( process );
1374 
[438]1375#if( DEBUG_PROCESS_MAKE_FORK & 1 )
[433]1376cycle = (uint32_t)hal_get_cycles();
[438]1377if( DEBUG_PROCESS_MAKE_FORK < cycle )
[593]1378printk("\n[%s] thread[%x,%x] set COW in parent and child / cycle %d\n",
[583]1379__FUNCTION__, pid, trdid, cycle );
[433]1380#endif
[101]1381
[428]1382    // get extended pointers on parent children_root, children_lock and children_nr
1383    xptr_t children_root_xp = XPTR( parent_process_cxy , &parent_process_ptr->children_root );
1384    xptr_t children_lock_xp = XPTR( parent_process_cxy , &parent_process_ptr->children_lock );
1385    xptr_t children_nr_xp   = XPTR( parent_process_cxy , &parent_process_ptr->children_nr   );
[101]1386
[428]1387    // register process in parent children list
[564]1388    remote_queuelock_acquire( children_lock_xp );
[428]1389        xlist_add_last( children_root_xp , XPTR( local_cxy , &process->children_list ) );
1390        hal_remote_atomic_add( children_nr_xp , 1 );
[564]1391    remote_queuelock_release( children_lock_xp );
[204]1392
[408]1393    // return success
1394    *child_thread = thread;
1395    *child_pid    = new_pid;
[1]1396
[438]1397#if DEBUG_PROCESS_MAKE_FORK
[433]1398cycle = (uint32_t)hal_get_cycles();
[438]1399if( DEBUG_PROCESS_MAKE_FORK < cycle )
[593]1400printk("\n[%s] thread[%x,%x] exit / created process %x / cycle %d\n",
[583]1401__FUNCTION__, pid, trdid, new_pid, cycle );
[433]1402#endif
[428]1403
[408]1404    return 0;
1405
[416]1406}   // end process_make_fork()
[408]1407
1408/////////////////////////////////////////////////////
1409error_t process_make_exec( exec_info_t  * exec_info )
1410{
[457]1411    thread_t       * thread;                  // local pointer on this thread
1412    process_t      * process;                 // local pointer on this process
1413    pid_t            pid;                     // this process identifier
[610]1414    xptr_t           ref_xp;                  // reference process for this process
[441]1415        error_t          error;                   // value returned by called functions
[457]1416    char           * path;                    // path to .elf file
1417    xptr_t           file_xp;                 // extended pointer on .elf file descriptor
1418    uint32_t         file_id;                 // file index in fd_array
1419    uint32_t         args_nr;                 // number of main thread arguments
1420    char          ** args_pointers;           // array of pointers on main thread arguments
[446]1421
[610]1422    // get thread, process, pid and ref_xp
[457]1423    thread  = CURRENT_THREAD;
1424    process = thread->process;
1425    pid     = process->pid;
[610]1426    ref_xp  = process->ref_xp;
[408]1427
[457]1428        // get relevant infos from exec_info
1429        path          = exec_info->path;
1430    args_nr       = exec_info->args_nr;
1431    args_pointers = exec_info->args_pointers;
[408]1432
[438]1433#if DEBUG_PROCESS_MAKE_EXEC
[433]1434uint32_t cycle = (uint32_t)hal_get_cycles();
[438]1435if( DEBUG_PROCESS_MAKE_EXEC < cycle )
[593]1436printk("\n[%s] thread[%x,%x] enters for %s / cycle %d\n",
[583]1437__FUNCTION__, pid, thread->trdid, path, cycle );
[433]1438#endif
[408]1439
[457]1440    // open the file identified by <path>
1441    file_xp = XPTR_NULL;
[564]1442    file_id = 0xFFFFFFFF;
[610]1443        error   = vfs_open( process->vfs_root_xp,
[457]1444                            path,
[610]1445                        ref_xp,
[457]1446                            O_RDONLY,
1447                            0,
1448                            &file_xp,
1449                            &file_id );
1450        if( error )
1451        {
1452                printk("\n[ERROR] in %s : failed to open file <%s>\n", __FUNCTION__ , path );
1453                return -1;
1454        }
1455
[446]1456#if (DEBUG_PROCESS_MAKE_EXEC & 1)
[469]1457cycle = (uint32_t)hal_get_cycles();
[446]1458if( DEBUG_PROCESS_MAKE_EXEC < cycle )
[593]1459printk("\n[%s] thread[%x,%x] opened file <%s> / cycle %d\n",
[583]1460__FUNCTION__, pid, thread->trdid, path, cycle );
[446]1461#endif
1462
[457]1463    // delete all threads other than this main thread in all clusters
1464    process_sigaction( pid , DELETE_ALL_THREADS );
[446]1465
[469]1466#if (DEBUG_PROCESS_MAKE_EXEC & 1)
1467cycle = (uint32_t)hal_get_cycles();
1468if( DEBUG_PROCESS_MAKE_EXEC < cycle )
[593]1469printk("\n[%s] thread[%x,%x] deleted all threads / cycle %d\n",
[583]1470__FUNCTION__, pid, thread->trdid, cycle );
[469]1471#endif
1472
[457]1473    // reset local process VMM
1474    vmm_destroy( process );
[446]1475
[457]1476#if( DEBUG_PROCESS_MAKE_EXEC & 1 )
1477cycle = (uint32_t)hal_get_cycles();
1478if( DEBUG_PROCESS_MAKE_EXEC < cycle )
[593]1479printk("\n[%s] thread[%x,%x] reset VMM / cycle %d\n",
[583]1480__FUNCTION__, pid, thread->trdid, cycle );
[457]1481#endif
[408]1482
[457]1483    // re-initialize the VMM (kentry/args/envs vsegs registration)
1484    error = vmm_init( process );
1485    if( error )
[416]1486    {
[457]1487        printk("\n[ERROR] in %s : cannot initialise VMM for %s\n", __FUNCTION__ , path );
1488        vfs_close( file_xp , file_id );
1489        // FIXME restore old process VMM
[416]1490        return -1;
1491    }
[457]1492   
[438]1493#if( DEBUG_PROCESS_MAKE_EXEC & 1 )
[433]1494cycle = (uint32_t)hal_get_cycles();
[438]1495if( DEBUG_PROCESS_MAKE_EXEC < cycle )
[593]1496printk("\n[%s] thread[%x,%x] / kentry/args/envs vsegs registered / cycle %d\n",
[583]1497__FUNCTION__, pid, thread->trdid, cycle );
[433]1498#endif
[428]1499
[457]1500    // register code & data vsegs as well as entry-point in process VMM,
[428]1501    // and register extended pointer on .elf file in process descriptor
[457]1502        error = elf_load_process( file_xp , process );
[441]1503    if( error )
[1]1504        {
[441]1505                printk("\n[ERROR] in %s : failed to access <%s>\n", __FUNCTION__ , path );
[457]1506        vfs_close( file_xp , file_id );
1507        // FIXME restore old process VMM
[408]1508        return -1;
[1]1509        }
1510
[438]1511#if( DEBUG_PROCESS_MAKE_EXEC & 1 )
[433]1512cycle = (uint32_t)hal_get_cycles();
[438]1513if( DEBUG_PROCESS_MAKE_EXEC < cycle )
[593]1514printk("\n[%s] thread[%x,%x] / code/data vsegs registered / cycle %d\n",
[583]1515__FUNCTION__, pid, thread->trdid, cycle );
[433]1516#endif
[1]1517
[457]1518    // update the existing main thread descriptor... and jump to user code
1519    error = thread_user_exec( (void *)process->vmm.entry_point,
1520                              args_nr,
1521                              args_pointers );
1522    if( error )
1523    {
[469]1524        printk("\n[ERROR] in %s : cannot update main thread for %s\n", __FUNCTION__ , path );
[457]1525        vfs_close( file_xp , file_id );
1526        // FIXME restore old process VMM
[408]1527        return -1;
[457]1528    }
[1]1529
[492]1530    assert( false, "we should not execute this code");
[457]1531 
[409]1532        return 0;
1533
1534}  // end process_make_exec()
1535
[457]1536
[428]1537///////////////////////////////////////////////
1538void process_zero_create( process_t * process )
1539{
[580]1540    error_t error;
1541    pid_t   pid;
[428]1542
[438]1543#if DEBUG_PROCESS_ZERO_CREATE
[433]1544uint32_t cycle = (uint32_t)hal_get_cycles();
[438]1545if( DEBUG_PROCESS_ZERO_CREATE < cycle )
[593]1546printk("\n[%s] enter / cluster %x / cycle %d\n",
[564]1547__FUNCTION__, local_cxy, cycle );
[433]1548#endif
[428]1549
[580]1550    // get PID from local cluster manager for this kernel process
1551    error = cluster_pid_alloc( process , &pid );
1552
1553    if( error || (LPID_FROM_PID( pid ) != 0) )
1554    {
1555        printk("\n[PANIC] in %s : cannot get valid PID in cluster %x / PID = %x\n",
1556        __FUNCTION__ , local_cxy, pid );
1557        hal_core_sleep();
1558    }
1559
[428]1560    // initialize PID, REF_XP, PARENT_XP, and STATE
[580]1561    // the kernel process_zero is its own parent_process,
1562    // reference_process, and owner_process, and cannot be killed...
1563    process->pid        = pid;
[433]1564    process->ref_xp     = XPTR( local_cxy , process );
[443]1565    process->owner_xp   = XPTR( local_cxy , process );
[580]1566    process->parent_xp  = XPTR( local_cxy , process );
[433]1567    process->term_state = 0;
[428]1568
[564]1569    // reset th_tbl[] array and associated fields
[428]1570    uint32_t i;
[564]1571    for( i = 0 ; i < CONFIG_THREADS_MAX_PER_CLUSTER ; i++ )
[428]1572        {
1573        process->th_tbl[i] = NULL;
1574    }
1575    process->th_nr  = 0;
[564]1576    rwlock_init( &process->th_lock , LOCK_PROCESS_THTBL );
[428]1577
[564]1578
[428]1579    // reset children list as empty
1580    xlist_root_init( XPTR( local_cxy , &process->children_root ) );
1581    process->children_nr = 0;
[564]1582    remote_queuelock_init( XPTR( local_cxy , &process->children_lock ),
1583                           LOCK_PROCESS_CHILDREN );
[428]1584
[580]1585    // register kernel process in cluster manager local_list
1586    cluster_process_local_link( process );
1587   
[428]1588        hal_fence();
1589
[438]1590#if DEBUG_PROCESS_ZERO_CREATE
[433]1591cycle = (uint32_t)hal_get_cycles();
[438]1592if( DEBUG_PROCESS_ZERO_CREATE < cycle )
[593]1593printk("\n[%s] exit / cluster %x / cycle %d\n",
[564]1594__FUNCTION__, local_cxy, cycle );
[433]1595#endif
[428]1596
[610]1597}  // end process_zero_create()
[428]1598
[564]1599////////////////////////////////
[485]1600void process_init_create( void )
[1]1601{
[428]1602    process_t      * process;       // local pointer on process descriptor
[409]1603    pid_t            pid;           // process_init identifier
1604    thread_t       * thread;        // local pointer on main thread
1605    pthread_attr_t   attr;          // main thread attributes
1606    lid_t            lid;           // selected core local index for main thread
[457]1607    xptr_t           file_xp;       // extended pointer on .elf file descriptor
1608    uint32_t         file_id;       // file index in fd_array
[409]1609    error_t          error;
[1]1610
[438]1611#if DEBUG_PROCESS_INIT_CREATE
[610]1612thread_t * this = CURRENT_THREAD;
[433]1613uint32_t cycle = (uint32_t)hal_get_cycles();
[438]1614if( DEBUG_PROCESS_INIT_CREATE < cycle )
[610]1615printk("\n[%s] thread[%x,%x] enter / cycle %d\n",
1616__FUNCTION__, this->process->pid, this->trdid, cycle );
[433]1617#endif
[1]1618
[408]1619    // allocates memory for process descriptor from local cluster
1620        process = process_alloc(); 
[457]1621       
[564]1622// check memory allocator
1623assert( (process != NULL),
1624"no memory for process descriptor in cluster %x\n", local_cxy  );
[101]1625
[610]1626    // set the CWD and VFS_ROOT fields in process descriptor
1627    process->cwd_xp      = process_zero.vfs_root_xp;
1628    process->vfs_root_xp = process_zero.vfs_root_xp;
1629
[409]1630    // get PID from local cluster
[416]1631    error = cluster_pid_alloc( process , &pid );
[408]1632
[564]1633// check PID allocator
1634assert( (error == 0),
1635"cannot allocate PID in cluster %x\n", local_cxy );
[409]1636
[564]1637// check PID value
1638assert( (pid == 1) ,
1639"process INIT must be first process in cluster 0\n" );
[457]1640
[409]1641    // initialize process descriptor / parent is local process_zero
1642    process_reference_init( process,
[408]1643                            pid,
[457]1644                            XPTR( local_cxy , &process_zero ) ); 
[408]1645
[564]1646#if(DEBUG_PROCESS_INIT_CREATE & 1)
1647if( DEBUG_PROCESS_INIT_CREATE < cycle )
[610]1648printk("\n[%s] thread[%x,%x] initialized process descriptor\n",
1649__FUNCTION__, this->process->pid, this->trdid );
[564]1650#endif
1651
[457]1652    // open the file identified by CONFIG_PROCESS_INIT_PATH
1653    file_xp = XPTR_NULL;
1654    file_id = -1;
[610]1655        error   = vfs_open( process->vfs_root_xp,
[457]1656                            CONFIG_PROCESS_INIT_PATH,
[610]1657                        XPTR( local_cxy , process ),
[457]1658                            O_RDONLY,
1659                            0,
1660                            &file_xp,
1661                            &file_id );
1662
[564]1663assert( (error == 0),
1664"failed to open file <%s>\n", CONFIG_PROCESS_INIT_PATH );
[457]1665
[564]1666#if(DEBUG_PROCESS_INIT_CREATE & 1)
1667if( DEBUG_PROCESS_INIT_CREATE < cycle )
[610]1668printk("\n[%s] thread[%x,%x] open .elf file decriptor\n",
1669__FUNCTION__, this->process->pid, this->trdid );
[564]1670#endif
1671
1672   // register "code" and "data" vsegs as well as entry-point
[409]1673    // in process VMM, using information contained in the elf file.
[457]1674        error = elf_load_process( file_xp , process );
[101]1675
[564]1676assert( (error == 0),
1677"cannot access .elf file <%s>\n", CONFIG_PROCESS_INIT_PATH );
[457]1678
[564]1679#if(DEBUG_PROCESS_INIT_CREATE & 1)
1680if( DEBUG_PROCESS_INIT_CREATE < cycle )
[610]1681printk("\n[%s] thread[%x,%x] registered code/data vsegs in VMM\n",
1682__FUNCTION__, this->process->pid, this->trdid );
[564]1683#endif
1684
[428]1685    // get extended pointers on process_zero children_root, children_lock
1686    xptr_t children_root_xp = XPTR( local_cxy , &process_zero.children_root );
1687    xptr_t children_lock_xp = XPTR( local_cxy , &process_zero.children_lock );
1688
[564]1689    // take lock protecting kernel process children list
1690    remote_queuelock_acquire( children_lock_xp );
1691
[428]1692    // register process INIT in parent local process_zero
1693        xlist_add_last( children_root_xp , XPTR( local_cxy , &process->children_list ) );
1694        hal_atomic_add( &process_zero.children_nr , 1 );
1695
[564]1696    // release lock protecting kernel process children list
1697    remote_queuelock_release( children_lock_xp );
1698
1699#if(DEBUG_PROCESS_INIT_CREATE & 1)
1700if( DEBUG_PROCESS_INIT_CREATE < cycle )
[610]1701printk("\n[%s] thread[%x,%x] registered init process in parent\n",
1702__FUNCTION__, this->process->pid, this->trdid );
[564]1703#endif
1704
[409]1705    // select a core in local cluster to execute the main thread
1706    lid  = cluster_select_local_core();
1707
1708    // initialize pthread attributes for main thread
1709    attr.attributes = PT_ATTR_DETACH | PT_ATTR_CLUSTER_DEFINED | PT_ATTR_CORE_DEFINED;
1710    attr.cxy        = local_cxy;
1711    attr.lid        = lid;
1712
1713    // create and initialize thread descriptor
1714        error = thread_user_create( pid,
1715                                (void *)process->vmm.entry_point,
1716                                NULL,
1717                                &attr,
1718                                &thread );
[1]1719
[564]1720assert( (error == 0),
1721"cannot create main thread for <%s>\n", CONFIG_PROCESS_INIT_PATH );
[428]1722
[564]1723assert( (thread->trdid == 0),
1724"main thread must have index 0 for <%s>\n", CONFIG_PROCESS_INIT_PATH );
[457]1725
[564]1726#if(DEBUG_PROCESS_INIT_CREATE & 1)
1727if( DEBUG_PROCESS_INIT_CREATE < cycle )
[610]1728printk("\n[%s] thread[%x,%x] created main thread\n",
1729__FUNCTION__, this->process->pid, this->trdid );
[564]1730#endif
1731
[409]1732    // activate thread
1733        thread_unblock( XPTR( local_cxy , thread ) , THREAD_BLOCKED_GLOBAL );
1734
[124]1735    hal_fence();
[1]1736
[438]1737#if DEBUG_PROCESS_INIT_CREATE
[433]1738cycle = (uint32_t)hal_get_cycles();
[438]1739if( DEBUG_PROCESS_INIT_CREATE < cycle )
[610]1740printk("\n[%s] thread[%x,%x] exit / cycle %d\n",
1741__FUNCTION__, this->process->pid, this->trdid, cycle );
[433]1742#endif
[409]1743
[204]1744}  // end process_init_create()
1745
[428]1746/////////////////////////////////////////
1747void process_display( xptr_t process_xp )
1748{
1749    process_t   * process_ptr;
1750    cxy_t         process_cxy;
[443]1751
[428]1752    xptr_t        parent_xp;       // extended pointer on parent process
1753    process_t   * parent_ptr;
1754    cxy_t         parent_cxy;
1755
[443]1756    xptr_t        owner_xp;        // extended pointer on owner process
1757    process_t   * owner_ptr;
1758    cxy_t         owner_cxy;
1759
[428]1760    pid_t         pid;
1761    pid_t         ppid;
[580]1762    lpid_t        lpid;
[428]1763    uint32_t      state;
1764    uint32_t      th_nr;
1765
[443]1766    xptr_t        txt_file_xp;     // extended pointer on TXT_RX file descriptor
1767    xptr_t        txt_chdev_xp;    // extended pointer on TXT_RX chdev
1768    chdev_t     * txt_chdev_ptr;
1769    cxy_t         txt_chdev_cxy;
1770    xptr_t        txt_owner_xp;    // extended pointer on TXT owner process
[428]1771
1772    xptr_t        elf_file_xp;     // extended pointer on .elf file
1773    cxy_t         elf_file_cxy;
1774    vfs_file_t  * elf_file_ptr;
1775    vfs_inode_t * elf_inode_ptr;   // local pointer on .elf inode
1776
1777    char          txt_name[CONFIG_VFS_MAX_NAME_LENGTH];
1778    char          elf_name[CONFIG_VFS_MAX_NAME_LENGTH];
1779
1780    // get cluster and local pointer on process
1781    process_ptr = GET_PTR( process_xp );
1782    process_cxy = GET_CXY( process_xp );
1783
[580]1784    // get process PID, LPID, and state
[564]1785    pid   = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid ) );
[580]1786    lpid  = LPID_FROM_PID( pid );
[564]1787    state = hal_remote_l32( XPTR( process_cxy , &process_ptr->term_state ) );
[428]1788
[580]1789    // get process PPID
[564]1790    parent_xp  = hal_remote_l64( XPTR( process_cxy , &process_ptr->parent_xp ) );
[428]1791    parent_cxy = GET_CXY( parent_xp );
1792    parent_ptr = GET_PTR( parent_xp );
[564]1793    ppid       = hal_remote_l32( XPTR( parent_cxy , &parent_ptr->pid ) );
[428]1794
1795    // get number of threads
[564]1796    th_nr      = hal_remote_l32( XPTR( process_cxy , &process_ptr->th_nr ) );
[428]1797
[443]1798    // get pointers on owner process descriptor
[564]1799    owner_xp  = hal_remote_l64( XPTR( process_cxy , &process_ptr->owner_xp ) );
[443]1800    owner_cxy = GET_CXY( owner_xp );
1801    owner_ptr = GET_PTR( owner_xp );
[428]1802
[580]1803    // get process TXT name and .elf name
1804    if( lpid )                                   // user process
1805    {
[443]1806
[580]1807        // get extended pointer on file descriptor associated to TXT_RX
1808        txt_file_xp = hal_remote_l64( XPTR( owner_cxy , &owner_ptr->fd_array.array[0] ) );
[428]1809
[580]1810        assert( (txt_file_xp != XPTR_NULL) ,
1811        "process must be attached to one TXT terminal\n" ); 
[443]1812
[580]1813        // get TXT_RX chdev pointers
1814        txt_chdev_xp  = chdev_from_file( txt_file_xp );
1815        txt_chdev_cxy = GET_CXY( txt_chdev_xp );
1816        txt_chdev_ptr = GET_PTR( txt_chdev_xp );
1817
1818        // get TXT_RX name and ownership
1819        hal_remote_strcpy( XPTR( local_cxy , txt_name ) ,
1820                           XPTR( txt_chdev_cxy , txt_chdev_ptr->name ) );
[428]1821   
[580]1822        txt_owner_xp = (xptr_t)hal_remote_l64( XPTR( txt_chdev_cxy, 
1823                                                     &txt_chdev_ptr->ext.txt.owner_xp ) );
[428]1824
[580]1825        // get process .elf name
1826        elf_file_xp   = hal_remote_l64( XPTR( process_cxy , &process_ptr->vfs_bin_xp ) );
1827        elf_file_cxy  = GET_CXY( elf_file_xp );
1828        elf_file_ptr  = GET_PTR( elf_file_xp );
1829        elf_inode_ptr = hal_remote_lpt( XPTR( elf_file_cxy , &elf_file_ptr->inode ) );
1830        vfs_inode_get_name( XPTR( elf_file_cxy , elf_inode_ptr ) , elf_name );
1831    }
1832    else                                         // kernel process_zero
1833    {
1834        // TXT name and .elf name are not registered in kernel process_zero
1835        strcpy( txt_name , "txt0_rx" );
1836        txt_owner_xp = process_xp; 
1837        strcpy( elf_name , "kernel.elf" );
1838    }
1839
[428]1840    // display process info
[443]1841    if( txt_owner_xp == process_xp )
[428]1842    {
[581]1843        nolock_printk("PID %X | %s (FG) | %X | PPID %X | TS %X | %d | %s\n", 
1844        pid, txt_name, process_ptr, ppid, state, th_nr, elf_name );
[428]1845    }
1846    else
1847    {
[581]1848        nolock_printk("PID %X | %s (BG) | %X | PPID %X | TS %X | %d | %s\n", 
1849        pid, txt_name, process_ptr, ppid, state, th_nr, elf_name );
[428]1850    }
1851}  // end process_display()
1852
1853
1854////////////////////////////////////////////////////////////////////////////////////////
1855//     Terminals related functions
1856////////////////////////////////////////////////////////////////////////////////////////
1857
[581]1858//////////////////////////////////
[485]1859uint32_t process_txt_alloc( void )
[428]1860{
1861    uint32_t  index;       // TXT terminal index
1862    xptr_t    chdev_xp;    // extended pointer on TXT_RX chdev
1863    chdev_t * chdev_ptr;   // local pointer on TXT_RX chdev
1864    cxy_t     chdev_cxy;   // TXT_RX chdev cluster
1865    xptr_t    root_xp;     // extended pointer on owner field in chdev
1866
1867    // scan the user TXT_RX chdevs (TXT0 is reserved for kernel)
1868    for( index = 1 ; index < LOCAL_CLUSTER->nb_txt_channels ; index ++ )
1869    {
1870        // get pointers on TXT_RX[index]
1871        chdev_xp  = chdev_dir.txt_rx[index];
1872        chdev_cxy = GET_CXY( chdev_xp );
1873        chdev_ptr = GET_PTR( chdev_xp );
1874
1875        // get extended pointer on root of attached process
1876        root_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.root );
1877
1878        // return free TXT index if found
1879        if( xlist_is_empty( root_xp ) ) return index; 
1880    }
1881
[492]1882    assert( false , "no free TXT terminal found" );
[428]1883
1884    return -1;
1885
1886} // end process_txt_alloc()
1887
1888/////////////////////////////////////////////
1889void process_txt_attach( process_t * process,
1890                         uint32_t    txt_id )
1891{
1892    xptr_t      chdev_xp;     // extended pointer on TXT_RX chdev
1893    cxy_t       chdev_cxy;    // TXT_RX chdev cluster
1894    chdev_t *   chdev_ptr;    // local pointer on TXT_RX chdev
1895    xptr_t      root_xp;      // extended pointer on list root in chdev
1896    xptr_t      lock_xp;      // extended pointer on list lock in chdev
1897
[564]1898// check process is in owner cluster
1899assert( (CXY_FROM_PID( process->pid ) == local_cxy) ,
1900"process descriptor not in owner cluster" );
[428]1901
[564]1902// check terminal index
1903assert( (txt_id < LOCAL_CLUSTER->nb_txt_channels) ,
1904"illegal TXT terminal index" );
[428]1905
1906    // get pointers on TXT_RX[txt_id] chdev
1907    chdev_xp  = chdev_dir.txt_rx[txt_id];
1908    chdev_cxy = GET_CXY( chdev_xp );
1909    chdev_ptr = GET_PTR( chdev_xp );
1910
1911    // get extended pointer on root & lock of attached process list
1912    root_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.root );
1913    lock_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.lock );
1914
[564]1915    // get lock protecting list of processes attached to TXT
1916    remote_busylock_acquire( lock_xp );
1917
[428]1918    // insert process in attached process list
1919    xlist_add_last( root_xp , XPTR( local_cxy , &process->txt_list ) );
1920
[564]1921    // release lock protecting list of processes attached to TXT
1922    remote_busylock_release( lock_xp );
1923
[446]1924#if DEBUG_PROCESS_TXT
[610]1925thread_t * this = CURRENT_THREAD;
[457]1926uint32_t cycle = (uint32_t)hal_get_cycles();
[446]1927if( DEBUG_PROCESS_TXT < cycle )
[610]1928printk("\n[%s] thread[%x,%x] attached process %x to TXT %d / cycle %d\n",
1929__FUNCTION__, this->process->pid, this->trdid, process->pid, txt_id , cycle );
[433]1930#endif
[428]1931
1932} // end process_txt_attach()
1933
[436]1934/////////////////////////////////////////////
1935void process_txt_detach( xptr_t  process_xp )
[428]1936{
[436]1937    process_t * process_ptr;  // local pointer on process in owner cluster
1938    cxy_t       process_cxy;  // process owner cluster
1939    pid_t       process_pid;  // process identifier
1940    xptr_t      file_xp;      // extended pointer on stdin file
[428]1941    xptr_t      chdev_xp;     // extended pointer on TXT_RX chdev
1942    cxy_t       chdev_cxy;    // TXT_RX chdev cluster
1943    chdev_t *   chdev_ptr;    // local pointer on TXT_RX chdev
1944    xptr_t      lock_xp;      // extended pointer on list lock in chdev
1945
[436]1946    // get process cluster, local pointer, and PID
1947    process_cxy = GET_CXY( process_xp );
1948    process_ptr = GET_PTR( process_xp );
[564]1949    process_pid = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid ) );
[436]1950
[564]1951// check process descriptor in owner cluster
1952assert( (CXY_FROM_PID( process_pid ) == process_cxy ) ,
1953"process descriptor not in owner cluster" );
[436]1954
1955    // release TXT ownership (does nothing if not TXT owner)
1956    process_txt_transfer_ownership( process_xp );
[428]1957
[436]1958    // get extended pointer on process stdin file
[564]1959    file_xp = (xptr_t)hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[0] ) );
[436]1960
1961    // get pointers on TXT_RX chdev
1962    chdev_xp  = chdev_from_file( file_xp );
[428]1963    chdev_cxy = GET_CXY( chdev_xp );
1964    chdev_ptr = (chdev_t *)GET_PTR( chdev_xp );
1965
[436]1966    // get extended pointer on lock protecting attached process list
[428]1967    lock_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.lock );
1968
[564]1969    // get lock protecting list of processes attached to TXT
1970    remote_busylock_acquire( lock_xp );
1971
[428]1972    // unlink process from attached process list
[436]1973    xlist_unlink( XPTR( process_cxy , &process_ptr->txt_list ) );
1974
[564]1975    // release lock protecting list of processes attached to TXT
1976    remote_busylock_release( lock_xp );
1977
[446]1978#if DEBUG_PROCESS_TXT
[610]1979thread_t * this = CURRENT_THREAD;
[457]1980uint32_t cycle  = (uint32_t)hal_get_cycles();
[564]1981uint32_t txt_id = hal_remote_l32( XPTR( chdev_cxy , &chdev_ptr->channel ) );
[446]1982if( DEBUG_PROCESS_TXT < cycle )
[610]1983printk("\n[%s] thread[%x,%x] detached process %x from TXT %d / cycle %d\n",
1984__FUNCTION__, this->process->pid, this->trdid, process_pid, txt_id, cycle );
[433]1985#endif
[428]1986
1987} // end process_txt_detach()
1988
1989///////////////////////////////////////////////////
1990void process_txt_set_ownership( xptr_t process_xp )
1991{
1992    process_t * process_ptr;
1993    cxy_t       process_cxy;
[436]1994    pid_t       process_pid;
[428]1995    xptr_t      file_xp;
1996    xptr_t      txt_xp;     
1997    chdev_t   * txt_ptr;
1998    cxy_t       txt_cxy;
1999
[436]2000    // get pointers on process in owner cluster
[428]2001    process_cxy = GET_CXY( process_xp );
[435]2002    process_ptr = GET_PTR( process_xp );
[564]2003    process_pid = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid ) );
[436]2004
2005    // check owner cluster
[492]2006    assert( (process_cxy == CXY_FROM_PID( process_pid )) ,
[436]2007    "process descriptor not in owner cluster\n" );
2008
[428]2009    // get extended pointer on stdin pseudo file
[564]2010    file_xp = hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[0] ) );
[428]2011
2012    // get pointers on TXT chdev
2013    txt_xp  = chdev_from_file( file_xp );
2014    txt_cxy = GET_CXY( txt_xp );
[435]2015    txt_ptr = GET_PTR( txt_xp );
[428]2016
2017    // set owner field in TXT chdev
[564]2018    hal_remote_s64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , process_xp );
[428]2019
[446]2020#if DEBUG_PROCESS_TXT
[610]2021thread_t * this = CURRENT_THREAD;
[457]2022uint32_t cycle  = (uint32_t)hal_get_cycles();
[564]2023uint32_t txt_id = hal_remote_l32( XPTR( txt_cxy , &txt_ptr->channel ) );
[446]2024if( DEBUG_PROCESS_TXT < cycle )
[610]2025printk("\n[%s] thread[%x,%x] give TXT %d to process %x / cycle %d\n",
2026__FUNCTION__, this->process->pid, this->trdid, txt_id, process_pid, cycle );
[436]2027#endif
2028
[428]2029}  // end process_txt_set ownership()
2030
[436]2031////////////////////////////////////////////////////////
2032void process_txt_transfer_ownership( xptr_t process_xp )
[428]2033{
[436]2034    process_t * process_ptr;     // local pointer on process releasing ownership
2035    cxy_t       process_cxy;     // process cluster
2036    pid_t       process_pid;     // process identifier
[428]2037    xptr_t      file_xp;         // extended pointer on TXT_RX pseudo file
2038    xptr_t      txt_xp;          // extended pointer on TXT_RX chdev
[433]2039    chdev_t   * txt_ptr;         // local pointer on TXT_RX chdev
2040    cxy_t       txt_cxy;         // cluster of TXT_RX chdev
2041    uint32_t    txt_id;          // TXT_RX channel
[428]2042    xptr_t      owner_xp;        // extended pointer on current TXT_RX owner
2043    xptr_t      root_xp;         // extended pointer on root of attached process list
[436]2044    xptr_t      lock_xp;         // extended pointer on lock protecting attached process list
[428]2045    xptr_t      iter_xp;         // iterator for xlist
2046    xptr_t      current_xp;      // extended pointer on current process
[433]2047    process_t * current_ptr;     // local pointer on current process
2048    cxy_t       current_cxy;     // cluster for current process
[428]2049
[457]2050#if DEBUG_PROCESS_TXT
[610]2051thread_t * this  = CURRENT_THREAD;
2052uint32_t   cycle;
[457]2053#endif
2054
[436]2055    // get pointers on process in owner cluster
[428]2056    process_cxy = GET_CXY( process_xp );
[435]2057    process_ptr = GET_PTR( process_xp );
[564]2058    process_pid = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid ) );
[436]2059
2060    // check owner cluster
[492]2061    assert( (process_cxy == CXY_FROM_PID( process_pid )) ,
[436]2062    "process descriptor not in owner cluster\n" );
2063
[428]2064    // get extended pointer on stdin pseudo file
[564]2065    file_xp = hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[0] ) );
[428]2066
2067    // get pointers on TXT chdev
2068    txt_xp  = chdev_from_file( file_xp );
2069    txt_cxy = GET_CXY( txt_xp );
[433]2070    txt_ptr = GET_PTR( txt_xp );
[428]2071
[433]2072    // get extended pointer on TXT_RX owner and TXT channel
[564]2073    owner_xp = hal_remote_l64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) );
2074    txt_id   = hal_remote_l32 ( XPTR( txt_cxy , &txt_ptr->channel ) );
[428]2075
[436]2076    // transfer ownership only if process is the TXT owner
2077    if( (owner_xp == process_xp) && (txt_id > 0) ) 
[428]2078    {
[436]2079        // get extended pointers on root and lock of attached processes list
2080        root_xp = XPTR( txt_cxy , &txt_ptr->ext.txt.root );
2081        lock_xp = XPTR( txt_cxy , &txt_ptr->ext.txt.lock );
[428]2082
[436]2083        // get lock
[564]2084        remote_busylock_acquire( lock_xp );
[436]2085
2086        if( process_get_ppid( process_xp ) != 1 )           // process is not KSH
[428]2087        {
[436]2088            // scan attached process list to find KSH process
2089            XLIST_FOREACH( root_xp , iter_xp )
2090            {
2091                current_xp  = XLIST_ELEMENT( iter_xp , process_t , txt_list );
2092                current_cxy = GET_CXY( current_xp );
2093                current_ptr = GET_PTR( current_xp );
[435]2094
[436]2095                if( process_get_ppid( current_xp ) == 1 )  // current is KSH
2096                {
2097                    // release lock
[564]2098                    remote_busylock_release( lock_xp );
[436]2099
2100                    // set owner field in TXT chdev
[564]2101                    hal_remote_s64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , current_xp );
[436]2102
[446]2103#if DEBUG_PROCESS_TXT
[610]2104cycle = (uint32_t)hal_get_cycles();
[564]2105uint32_t ksh_pid = hal_remote_l32( XPTR( current_cxy , &current_ptr->pid ) );
[446]2106if( DEBUG_PROCESS_TXT < cycle )
[610]2107printk("\n[%s] thread[%x,%x] release TXT %d to KSH %x / cycle %d\n",
2108__FUNCTION__, this->process->pid, this->trdid, txt_id, ksh_pid, cycle );
[457]2109process_txt_display( txt_id );
[436]2110#endif
2111                     return;
2112                }
2113            }
2114 
2115            // release lock
[564]2116            remote_busylock_release( lock_xp );
[436]2117
2118            // PANIC if KSH not found
[492]2119            assert( false , "KSH process not found for TXT %d" );
[436]2120
2121            return;
2122        }
2123        else                                               // process is KSH
2124        {
2125            // scan attached process list to find another process
2126            XLIST_FOREACH( root_xp , iter_xp )
[428]2127            {
[436]2128                current_xp  = XLIST_ELEMENT( iter_xp , process_t , txt_list );
2129                current_cxy = GET_CXY( current_xp );
2130                current_ptr = GET_PTR( current_xp );
2131
2132                if( current_xp != process_xp )            // current is not KSH
2133                {
2134                    // release lock
[564]2135                    remote_busylock_release( lock_xp );
[436]2136
2137                    // set owner field in TXT chdev
[564]2138                    hal_remote_s64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , current_xp );
[436]2139
[446]2140#if DEBUG_PROCESS_TXT
[610]2141cycle  = (uint32_t)hal_get_cycles();
[564]2142uint32_t new_pid = hal_remote_l32( XPTR( current_cxy , &current_ptr->pid ) );
[446]2143if( DEBUG_PROCESS_TXT < cycle )
[610]2144printk("\n[%s] thread[%x,%x] release TXT %d to process %x / cycle %d\n",
2145__FUNCTION__,this->process->pid, this->trdid, txt_id, new_pid, cycle );
[457]2146process_txt_display( txt_id );
[436]2147#endif
2148                     return;
2149                }
[428]2150            }
[436]2151
2152            // release lock
[564]2153            remote_busylock_release( lock_xp );
[436]2154
2155            // no more owner for TXT if no other process found
[564]2156            hal_remote_s64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , XPTR_NULL );
[436]2157
[446]2158#if DEBUG_PROCESS_TXT
[436]2159cycle = (uint32_t)hal_get_cycles();
[446]2160if( DEBUG_PROCESS_TXT < cycle )
[610]2161printk("\n[%s] thread[%x,%x] release TXT %d to nobody / cycle %d\n",
2162__FUNCTION__, this->process->pid, this->trdid, txt_id, cycle );
[457]2163process_txt_display( txt_id );
[436]2164#endif
2165            return;
[428]2166        }
[436]2167    }
2168    else
2169    {
[433]2170
[446]2171#if DEBUG_PROCESS_TXT
[436]2172cycle = (uint32_t)hal_get_cycles();
[446]2173if( DEBUG_PROCESS_TXT < cycle )
[593]2174printk("\n[%s] thread %x in process %d does nothing (not TXT owner) / cycle %d\n",
[610]2175__FUNCTION__, this->trdid, process_pid, cycle );
[457]2176process_txt_display( txt_id );
[436]2177#endif
2178
[428]2179    }
[436]2180}  // end process_txt_transfer_ownership()
[428]2181
2182
[564]2183////////////////////////////////////////////////
2184bool_t process_txt_is_owner( xptr_t process_xp )
[457]2185{
2186    // get local pointer and cluster of process in owner cluster
2187    cxy_t       process_cxy = GET_CXY( process_xp );
2188    process_t * process_ptr = GET_PTR( process_xp );
2189
[564]2190// check calling thread execute in target process owner cluster
2191pid_t process_pid = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid ) );
2192assert( (process_cxy == CXY_FROM_PID( process_pid )) ,
2193"process descriptor not in owner cluster\n" );
[457]2194
2195    // get extended pointer on stdin pseudo file
[564]2196    xptr_t file_xp = hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[0] ) );
[457]2197
2198    // get pointers on TXT chdev
2199    xptr_t    txt_xp  = chdev_from_file( file_xp );
2200    cxy_t     txt_cxy = GET_CXY( txt_xp );
2201    chdev_t * txt_ptr = GET_PTR( txt_xp );
2202
2203    // get extended pointer on TXT_RX owner process
[564]2204    xptr_t owner_xp = hal_remote_l64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) );
[457]2205
2206    return (process_xp == owner_xp);
2207
2208}   // end process_txt_is_owner()
2209
[436]2210////////////////////////////////////////////////     
2211xptr_t process_txt_get_owner( uint32_t channel )
[435]2212{
2213    xptr_t      txt_rx_xp  = chdev_dir.txt_rx[channel];
2214    cxy_t       txt_rx_cxy = GET_CXY( txt_rx_xp );
2215    chdev_t *   txt_rx_ptr = GET_PTR( txt_rx_xp );
2216
[564]2217    return (xptr_t)hal_remote_l64( XPTR( txt_rx_cxy , &txt_rx_ptr->ext.txt.owner_xp ) );
[435]2218
[457]2219}  // end process_txt_get_owner()
2220
[435]2221///////////////////////////////////////////
2222void process_txt_display( uint32_t txt_id )
2223{
2224    xptr_t      chdev_xp;
2225    cxy_t       chdev_cxy;
2226    chdev_t   * chdev_ptr;
2227    xptr_t      root_xp;
2228    xptr_t      lock_xp;
2229    xptr_t      current_xp;
2230    xptr_t      iter_xp;
[443]2231    cxy_t       txt0_cxy;
2232    chdev_t   * txt0_ptr;
2233    xptr_t      txt0_xp;
2234    xptr_t      txt0_lock_xp;
2235   
[435]2236    assert( (txt_id < LOCAL_CLUSTER->nb_txt_channels) ,
[492]2237    "illegal TXT terminal index" );
[435]2238
[443]2239    // get pointers on TXT0 chdev
2240    txt0_xp  = chdev_dir.txt_tx[0];
2241    txt0_cxy = GET_CXY( txt0_xp );
2242    txt0_ptr = GET_PTR( txt0_xp );
2243
2244    // get extended pointer on TXT0 lock
2245    txt0_lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock );
2246
[435]2247    // get pointers on TXT_RX[txt_id] chdev
2248    chdev_xp  = chdev_dir.txt_rx[txt_id];
2249    chdev_cxy = GET_CXY( chdev_xp );
2250    chdev_ptr = GET_PTR( chdev_xp );
2251
2252    // get extended pointer on root & lock of attached process list
2253    root_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.root );
2254    lock_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.lock );
2255
[443]2256    // get lock on attached process list
[564]2257    remote_busylock_acquire( lock_xp );
[443]2258
2259    // get TXT0 lock in busy waiting mode
[564]2260    remote_busylock_acquire( txt0_lock_xp );
[443]2261
[435]2262    // display header
[443]2263    nolock_printk("\n***** processes attached to TXT_%d / cycle %d\n",
2264    txt_id , (uint32_t)hal_get_cycles() );
[435]2265
[436]2266    // scan attached process list
[435]2267    XLIST_FOREACH( root_xp , iter_xp )
2268    {
2269        current_xp  = XLIST_ELEMENT( iter_xp , process_t , txt_list );
2270        process_display( current_xp );
2271    }
2272
[443]2273    // release TXT0 lock in busy waiting mode
[564]2274    remote_busylock_release( txt0_lock_xp );
[443]2275
2276    // release lock on attached process list
[564]2277    remote_busylock_release( lock_xp );
[435]2278
2279}  // end process_txt_display
Note: See TracBrowser for help on using the repository browser.