source: trunk/kernel/kern/process.c @ 624

Last change on this file since 624 was 624, checked in by alain, 5 years ago

Fix several bugs to use the instruction MMU in kernel mode
in replacement of the instruction address extension register,
and remove the "kentry" segment.

This version is running on the tsar_generic_iob" platform.

One interesting bug: the cp0_ebase defining the kernel entry point
(for interrupts, exceptions and syscalls) must be initialized
early in kernel_init(), because the VFS initialisation done by
kernel_ini() uses RPCs, and RPCs uses Inter-Processor-Interrup.

File size: 79.4 KB
RevLine 
[1]1/*
[564]2 * process.c - process related functions definition.
[172]3 *
[1]4 * Authors  Ghassan Almaless (2008,2009,2010,2011,2012)
5 *          Mohamed Lamine Karaoui (2015)
[618]6 *          Alain Greiner (2016,2017,2018,2019)
[1]7 *
8 * Copyright (c) UPMC Sorbonne Universites
9 *
[409]10 * This file is part of ALMOS-MKH.
[1]11 *
[172]12 * ALMOS-MKH is free software; you can redistribute it and/or modify it
[1]13 * under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; version 2.0 of the License.
15 *
[172]16 * ALMOS-MKH is distributed in the hope that it will be useful, but
[1]17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
[172]22 * along with ALMOS-MKH; if not, write to the Free Software Foundation,
[1]23 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24 */
25
[14]26#include <kernel_config.h>
[457]27#include <hal_kernel_types.h>
[1]28#include <hal_remote.h>
29#include <hal_uspace.h>
[409]30#include <hal_irqmask.h>
[623]31#include <hal_vmm.h>
[1]32#include <errno.h>
33#include <printk.h>
34#include <memcpy.h>
35#include <bits.h>
36#include <kmem.h>
37#include <page.h>
38#include <vmm.h>
39#include <vfs.h>
40#include <core.h>
41#include <thread.h>
[428]42#include <chdev.h>
[1]43#include <list.h>
[407]44#include <string.h>
[1]45#include <scheduler.h>
[564]46#include <busylock.h>
47#include <queuelock.h>
48#include <remote_queuelock.h>
49#include <rwlock.h>
50#include <remote_rwlock.h>
[1]51#include <dqdt.h>
52#include <cluster.h>
53#include <ppm.h>
54#include <boot_info.h>
55#include <process.h>
56#include <elf.h>
[23]57#include <syscalls.h>
[435]58#include <shared_syscalls.h>
[1]59
60//////////////////////////////////////////////////////////////////////////////////////////
61// Extern global variables
62//////////////////////////////////////////////////////////////////////////////////////////
63
[428]64extern process_t           process_zero;     // allocated in kernel_init.c
65extern chdev_directory_t   chdev_dir;        // allocated in kernel_init.c
[1]66
67//////////////////////////////////////////////////////////////////////////////////////////
68// Process initialisation related functions
69//////////////////////////////////////////////////////////////////////////////////////////
70
[583]71/////////////////////////////////
[503]72process_t * process_alloc( void )
[1]73{
74        kmem_req_t   req;
75
76    req.type  = KMEM_PROCESS;
77        req.size  = sizeof(process_t);
78        req.flags = AF_KERNEL;
79
80    return (process_t *)kmem_alloc( &req );
81}
82
83////////////////////////////////////////
84void process_free( process_t * process )
85{
86    kmem_req_t  req;
87
88        req.type = KMEM_PROCESS;
89        req.ptr  = process;
90        kmem_free( &req );
91}
92
[101]93/////////////////////////////////////////////////
94void process_reference_init( process_t * process,
95                             pid_t       pid,
[457]96                             xptr_t      parent_xp )
[1]97{
[610]98    xptr_t      process_xp;
[428]99    cxy_t       parent_cxy;
100    process_t * parent_ptr;
[407]101    xptr_t      stdin_xp;
102    xptr_t      stdout_xp;
103    xptr_t      stderr_xp;
104    uint32_t    stdin_id;
105    uint32_t    stdout_id;
106    uint32_t    stderr_id;
[415]107    error_t     error;
[428]108    uint32_t    txt_id;
109    char        rx_path[40];
110    char        tx_path[40];
[440]111    xptr_t      file_xp;
[428]112    xptr_t      chdev_xp;
113    chdev_t *   chdev_ptr;
114    cxy_t       chdev_cxy;
115    pid_t       parent_pid;
[1]116
[610]117    // build extended pointer on this reference process
118    process_xp = XPTR( local_cxy , process );
119
[428]120    // get parent process cluster and local pointer
121    parent_cxy = GET_CXY( parent_xp );
[435]122    parent_ptr = GET_PTR( parent_xp );
[204]123
[457]124    // get parent_pid
[564]125    parent_pid = hal_remote_l32( XPTR( parent_cxy , &parent_ptr->pid ) );
[428]126
[438]127#if DEBUG_PROCESS_REFERENCE_INIT
[610]128thread_t * this = CURRENT_THREAD;
[433]129uint32_t cycle = (uint32_t)hal_get_cycles();
[610]130if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
131printk("\n[%s] thread[%x,%x] enter to initalialize process %x / cycle %d\n",
132__FUNCTION__, parent_pid, this->trdid, pid, cycle );
[433]133#endif
[428]134
[610]135    // initialize pid, ref_xp, parent_xp, owner_xp, term_state fields
[433]136        process->pid        = pid;
137    process->ref_xp     = XPTR( local_cxy , process );
[443]138    process->owner_xp   = XPTR( local_cxy , process );
[433]139    process->parent_xp  = parent_xp;
140    process->term_state = 0;
[428]141
[610]142    // initialize VFS root inode and CWD inode
143    process->vfs_root_xp = hal_remote_l64( XPTR( parent_cxy, &parent_ptr->vfs_root_xp ) );
144    process->cwd_xp      = hal_remote_l64( XPTR( parent_cxy, &parent_ptr->cwd_xp ) );
145
[409]146    // initialize vmm as empty
[415]147    error = vmm_init( process );
[564]148
149assert( (error == 0) , "cannot initialize VMM\n" );
[415]150 
[438]151#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
[433]152cycle = (uint32_t)hal_get_cycles();
[610]153if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
154printk("\n[%s] thread[%x,%x] / vmm empty for process %x / cycle %d\n", 
155__FUNCTION__, parent_pid, this->trdid, pid, cycle );
[433]156#endif
[1]157
[409]158    // initialize fd_array as empty
[408]159    process_fd_init( process );
[1]160
[428]161    // define the stdin/stdout/stderr pseudo files <=> select a TXT terminal.
[581]162    if( (pid == 1) || (parent_pid  == 1) )      // INIT or KSH  process
[408]163    {
[581]164        // select a TXT channel
165        if( pid == 1 )  txt_id = 0;                     // INIT
166        else            txt_id = process_txt_alloc();   // KSH
[428]167
[457]168        // attach process to TXT
[428]169        process_txt_attach( process , txt_id ); 
170
[457]171#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
172cycle = (uint32_t)hal_get_cycles();
[610]173if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
174printk("\n[%s] thread[%x,%x] / process %x attached to TXT%d / cycle %d\n", 
175__FUNCTION__, parent_pid, this->trdid, pid, txt_id, cycle );
[457]176#endif
[428]177        // build path to TXT_RX[i] and TXT_TX[i] chdevs
178        snprintf( rx_path , 40 , "/dev/external/txt%d_rx", txt_id );
179        snprintf( tx_path , 40 , "/dev/external/txt%d_tx", txt_id );
180
181        // create stdin pseudo file         
[610]182        error = vfs_open(  process->vfs_root_xp,
[428]183                           rx_path,
[610]184                           process_xp,
[408]185                           O_RDONLY, 
186                           0,                // FIXME chmod
187                           &stdin_xp, 
188                           &stdin_id );
[1]189
[564]190assert( (error == 0) , "cannot open stdin pseudo file" );
191assert( (stdin_id == 0) , "stdin index must be 0" );
[428]192
[440]193#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
194cycle = (uint32_t)hal_get_cycles();
[610]195if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
196printk("\n[%s] thread[%x,%x] / stdin open for process %x / cycle %d\n", 
197__FUNCTION__, parent_pid, this->trdid, pid, cycle );
[440]198#endif
199
[428]200        // create stdout pseudo file         
[610]201        error = vfs_open(  process->vfs_root_xp,
[428]202                           tx_path,
[610]203                           process_xp,
[408]204                           O_WRONLY, 
205                           0,                // FIXME chmod
206                           &stdout_xp, 
207                           &stdout_id );
[1]208
[492]209        assert( (error == 0) , "cannot open stdout pseudo file" );
210        assert( (stdout_id == 1) , "stdout index must be 1" );
[428]211
[440]212#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
213cycle = (uint32_t)hal_get_cycles();
[610]214if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
215printk("\n[%s] thread[%x,%x] / stdout open for process %x / cycle %d\n", 
216__FUNCTION__, parent_pid, this->trdid, pid, cycle );
[440]217#endif
218
[428]219        // create stderr pseudo file         
[610]220        error = vfs_open(  process->vfs_root_xp,
[428]221                           tx_path,
[610]222                           process_xp,
[408]223                           O_WRONLY, 
224                           0,                // FIXME chmod
225                           &stderr_xp, 
226                           &stderr_id );
[428]227
[492]228        assert( (error == 0) , "cannot open stderr pseudo file" );
229        assert( (stderr_id == 2) , "stderr index must be 2" );
[428]230
[440]231#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
232cycle = (uint32_t)hal_get_cycles();
[610]233if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
234printk("\n[%s] thread[%x,%x] / stderr open for process %x / cycle %d\n", 
235__FUNCTION__, parent_pid, this->trdid, pid, cycle );
[440]236#endif
237
[408]238    }
[428]239    else                                            // normal user process
[408]240    {
[457]241        // get extended pointer on stdin pseudo file in parent process
[564]242        file_xp = (xptr_t)hal_remote_l64( XPTR( parent_cxy , &parent_ptr->fd_array.array[0] ) );
[440]243
[457]244        // get extended pointer on parent process TXT chdev
[440]245        chdev_xp = chdev_from_file( file_xp );
[428]246 
247        // get cluster and local pointer on chdev
248        chdev_cxy = GET_CXY( chdev_xp );
[435]249        chdev_ptr = GET_PTR( chdev_xp );
[428]250 
[564]251        // get parent process TXT terminal index
252        txt_id = hal_remote_l32( XPTR( chdev_cxy , &chdev_ptr->channel ) );
[407]253
[564]254        // attach child process to parent process TXT terminal
[428]255        process_txt_attach( process , txt_id ); 
[407]256
[457]257        // copy all open files from parent process fd_array to this process
[428]258        process_fd_remote_copy( XPTR( local_cxy , &process->fd_array ),
[457]259                                XPTR( parent_cxy , &parent_ptr->fd_array ) );
[408]260    }
[407]261
[610]262    // initialize lock protecting CWD changes
263    remote_busylock_init( XPTR( local_cxy , &process->cwd_lock ), LOCK_PROCESS_CWD );
[408]264
[438]265#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
[433]266cycle = (uint32_t)hal_get_cycles();
[610]267if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
268printk("\n[%s] thread[%x,%x] / set fd_array for process %x / cycle %d\n", 
269__FUNCTION__, parent_pid, this->trdid, pid , cycle );
[433]270#endif
[407]271
[408]272    // reset children list root
273    xlist_root_init( XPTR( local_cxy , &process->children_root ) );
274    process->children_nr     = 0;
[564]275    remote_queuelock_init( XPTR( local_cxy , &process->children_lock ), LOCK_PROCESS_CHILDREN );
[407]276
[611]277    // reset semaphore / mutex / barrier / condvar list roots and lock
[408]278    xlist_root_init( XPTR( local_cxy , &process->sem_root ) );
279    xlist_root_init( XPTR( local_cxy , &process->mutex_root ) );
280    xlist_root_init( XPTR( local_cxy , &process->barrier_root ) );
281    xlist_root_init( XPTR( local_cxy , &process->condvar_root ) );
[564]282    remote_queuelock_init( XPTR( local_cxy , &process->sync_lock ), LOCK_PROCESS_USERSYNC );
[407]283
[611]284    // reset open directories root and lock
285    xlist_root_init( XPTR( local_cxy , &process->dir_root ) );
286    remote_queuelock_init( XPTR( local_cxy , &process->dir_lock ), LOCK_PROCESS_DIR );
287
[408]288    // register new process in the local cluster manager pref_tbl[]
289    lpid_t lpid = LPID_FROM_PID( pid );
290    LOCAL_CLUSTER->pmgr.pref_tbl[lpid] = XPTR( local_cxy , process );
[407]291
[408]292    // register new process descriptor in local cluster manager local_list
293    cluster_process_local_link( process );
[407]294
[408]295    // register new process descriptor in local cluster manager copies_list
296    cluster_process_copies_link( process );
[172]297
[564]298    // initialize th_tbl[] array and associated threads
[1]299    uint32_t i;
[564]300
301    for( i = 0 ; i < CONFIG_THREADS_MAX_PER_CLUSTER ; i++ )
[1]302        {
303        process->th_tbl[i] = NULL;
304    }
305    process->th_nr  = 0;
[564]306    rwlock_init( &process->th_lock , LOCK_PROCESS_THTBL );
[1]307
[124]308        hal_fence();
[1]309
[438]310#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
[433]311cycle = (uint32_t)hal_get_cycles();
[610]312if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
313printk("\n[%s] thread[%x,%x] exit for process %x / cycle %d\n", 
314__FUNCTION__, parent_pid, this->trdid, pid, cycle );
[433]315#endif
[101]316
[428]317}  // process_reference_init()
[204]318
[1]319/////////////////////////////////////////////////////
320error_t process_copy_init( process_t * local_process,
321                           xptr_t      reference_process_xp )
322{
[415]323    error_t error;
324
[23]325    // get reference process cluster and local pointer
326    cxy_t       ref_cxy = GET_CXY( reference_process_xp );
[435]327    process_t * ref_ptr = GET_PTR( reference_process_xp );
[1]328
[428]329    // initialize PID, REF_XP, PARENT_XP, and STATE
[564]330    local_process->pid        = hal_remote_l32(  XPTR( ref_cxy , &ref_ptr->pid ) );
331    local_process->parent_xp  = hal_remote_l64( XPTR( ref_cxy , &ref_ptr->parent_xp ) );
[433]332    local_process->ref_xp     = reference_process_xp;
[443]333    local_process->owner_xp   = reference_process_xp;
[433]334    local_process->term_state = 0;
[407]335
[564]336#if DEBUG_PROCESS_COPY_INIT
[610]337thread_t * this = CURRENT_THREAD; 
[433]338uint32_t cycle = (uint32_t)hal_get_cycles();
[610]339if( DEBUG_PROCESS_COPY_INIT < cycle )
340printk("\n[%s] thread[%x,%x] enter for process %x / cycle %d\n",
341__FUNCTION__, this->process->pid, this->trdid, local_process->pid, cycle );
[433]342#endif
[407]343
[564]344// check user process
345assert( (local_process->pid != 0), "PID cannot be 0" );
346
[172]347    // reset local process vmm
[415]348    error = vmm_init( local_process );
[492]349    assert( (error == 0) , "cannot initialize VMM\n");
[1]350
[172]351    // reset process file descriptors array
[23]352        process_fd_init( local_process );
[1]353
[610]354    // reset vfs_root_xp / vfs_bin_xp / cwd_xp fields
[564]355    local_process->vfs_root_xp = hal_remote_l64( XPTR( ref_cxy , &ref_ptr->vfs_root_xp ) );
356    local_process->vfs_bin_xp  = hal_remote_l64( XPTR( ref_cxy , &ref_ptr->vfs_bin_xp ) );
[610]357    local_process->cwd_xp      = XPTR_NULL;
[1]358
359    // reset children list root (not used in a process descriptor copy)
360    xlist_root_init( XPTR( local_cxy , &local_process->children_root ) );
[172]361    local_process->children_nr   = 0;
[564]362    remote_queuelock_init( XPTR( local_cxy , &local_process->children_lock ),
363                           LOCK_PROCESS_CHILDREN );
[1]364
[428]365    // reset children_list (not used in a process descriptor copy)
366    xlist_entry_init( XPTR( local_cxy , &local_process->children_list ) );
[1]367
368    // reset semaphores list root (not used in a process descriptor copy)
369    xlist_root_init( XPTR( local_cxy , &local_process->sem_root ) );
[23]370    xlist_root_init( XPTR( local_cxy , &local_process->mutex_root ) );
371    xlist_root_init( XPTR( local_cxy , &local_process->barrier_root ) );
372    xlist_root_init( XPTR( local_cxy , &local_process->condvar_root ) );
[1]373
[564]374    // initialize th_tbl[] array and associated fields
[1]375    uint32_t i;
[564]376    for( i = 0 ; i < CONFIG_THREADS_MAX_PER_CLUSTER ; i++ )
[1]377        {
378        local_process->th_tbl[i] = NULL;
379    }
380    local_process->th_nr  = 0;
[564]381    rwlock_init( &local_process->th_lock , LOCK_PROCESS_THTBL );
[1]382
[564]383
[1]384    // register new process descriptor in local cluster manager local_list
385    cluster_process_local_link( local_process );
386
387    // register new process descriptor in owner cluster manager copies_list
388    cluster_process_copies_link( local_process );
389
[124]390        hal_fence();
[1]391
[438]392#if DEBUG_PROCESS_COPY_INIT
[433]393cycle = (uint32_t)hal_get_cycles();
[610]394if( DEBUG_PROCESS_COPY_INIT < cycle )
395printk("\n[%s] thread[%x,%x] exit for process %x / cycle %d\n",
396__FUNCTION__, this->process->pid, this->trdid, local_process->pid, cycle );
[433]397#endif
[279]398
[1]399    return 0;
400
[204]401} // end process_copy_init()
402
[1]403///////////////////////////////////////////
404void process_destroy( process_t * process )
405{
[428]406    xptr_t      parent_xp;
407    process_t * parent_ptr;
408    cxy_t       parent_cxy;
409    xptr_t      children_lock_xp;
[446]410    xptr_t      children_nr_xp;
[1]411
[437]412    pid_t       pid = process->pid;
413
[593]414// check no more threads
[618]415assert( (process->th_nr == 0),
416"process %x in cluster %x contains threads", pid , local_cxy );
[428]417
[438]418#if DEBUG_PROCESS_DESTROY
[610]419thread_t * this = CURRENT_THREAD;
[433]420uint32_t cycle = (uint32_t)hal_get_cycles();
[610]421if( DEBUG_PROCESS_DESTROY < cycle )
422printk("\n[%s] thread[%x,%x] enter for process %x in cluster %x / cycle %d\n",
423__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy, cycle );
[433]424#endif
[428]425
[618]426    // Destroy VMM
427    vmm_destroy( process );
428
429#if (DEBUG_PROCESS_DESTROY & 1)
430if( DEBUG_PROCESS_DESTROY < cycle )
431printk("\n[%s] thread[%x,%x] destroyed VMM for process %x in cluster %x\n",
432__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy );
433#endif
434
[436]435    // remove process from local_list in local cluster manager
436    cluster_process_local_unlink( process );
[1]437
[618]438#if (DEBUG_PROCESS_DESTROY & 1)
439if( DEBUG_PROCESS_DESTROY < cycle )
440printk("\n[%s] thread[%x,%x] removed process %x in cluster %x from local list\n",
441__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy );
442#endif
443
[436]444    // remove process from copies_list in owner cluster manager
445    cluster_process_copies_unlink( process );
[23]446
[618]447#if (DEBUG_PROCESS_DESTROY & 1)
448if( DEBUG_PROCESS_DESTROY < cycle )
449printk("\n[%s] thread[%x,%x] removed process %x in cluster %x from copies list\n",
450__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy );
451#endif
452
[450]453    // remove process from children_list
454    // and release PID if owner cluster
[437]455    if( CXY_FROM_PID( pid ) == local_cxy )
[428]456    {
457        // get pointers on parent process
458        parent_xp  = process->parent_xp;
459        parent_cxy = GET_CXY( parent_xp );
460        parent_ptr = GET_PTR( parent_xp );
461
462        // get extended pointer on children_lock in parent process
463        children_lock_xp = XPTR( parent_cxy , &parent_ptr->children_lock );
[446]464        children_nr_xp   = XPTR( parent_cxy , &parent_ptr->children_nr );
[428]465
466        // remove process from children_list
[564]467        remote_queuelock_acquire( children_lock_xp );
[428]468        xlist_unlink( XPTR( local_cxy , &process->children_list ) );
[446]469            hal_remote_atomic_add( children_nr_xp , -1 );
[564]470        remote_queuelock_release( children_lock_xp );
[450]471
[618]472#if (DEBUG_PROCESS_DESTROY & 1)
473if( DEBUG_PROCESS_DESTROY < cycle )
474printk("\n[%s] thread[%x,%x] removed process %x in cluster %x from children list\n",
475__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy );
476#endif
477
[564]478        // release the process PID to cluster manager
479        cluster_pid_release( pid );
[428]480
[618]481#if (DEBUG_PROCESS_DESTROY & 1)
482if( DEBUG_PROCESS_DESTROY < cycle )
483printk("\n[%s] thread[%x,%x] released process PID %x to pmgr in cluster %x\n",
484__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy );
485#endif
[23]486
[618]487    }
[1]488
[623]489    // FIXME decrement the refcount on file pointer for vfs_bin_xp [AG]
490
[618]491    // FIXME close all open files [AG]
[623]492
[618]493    // FIXME synchronize dirty files [AG]
[1]494
[416]495    // release memory allocated to process descriptor
496    process_free( process );
[1]497
[438]498#if DEBUG_PROCESS_DESTROY
[433]499cycle = (uint32_t)hal_get_cycles();
[610]500if( DEBUG_PROCESS_DESTROY < cycle )
501printk("\n[%s] thread[%x,%x] exit / process %x in cluster %x / cycle %d\n",
502__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy, cycle );
[433]503#endif
[428]504
[407]505}  // end process_destroy()
506
[583]507///////////////////////////////////////////////////////////////////
[527]508const char * process_action_str( process_sigactions_t action_type )
[409]509{
[583]510    switch ( action_type )
511    {
512        case BLOCK_ALL_THREADS:   return "BLOCK";
513        case UNBLOCK_ALL_THREADS: return "UNBLOCK";
514        case DELETE_ALL_THREADS:  return "DELETE";
515        default:                  return "undefined";
516    }
[409]517}
518
[435]519////////////////////////////////////////
520void process_sigaction( pid_t       pid,
[457]521                        uint32_t    type )
[409]522{
523    cxy_t              owner_cxy;         // owner cluster identifier
524    lpid_t             lpid;              // process index in owner cluster
525    cluster_t        * cluster;           // pointer on cluster manager
526    xptr_t             root_xp;           // extended pointer on root of copies
527    xptr_t             lock_xp;           // extended pointer on lock protecting copies
528    xptr_t             iter_xp;           // iterator on copies list
529    xptr_t             process_xp;        // extended pointer on process copy
530    cxy_t              process_cxy;       // process copy cluster identifier
[457]531    process_t        * process_ptr;       // local pointer on process copy
[436]532    reg_t              save_sr;           // for critical section
[457]533    thread_t         * client;            // pointer on client thread
534    xptr_t             client_xp;         // extended pointer on client thread
535    process_t        * local;             // pointer on process copy in local cluster
536    uint32_t           remote_nr;         // number of remote process copies
[619]537    rpc_desc_t         rpc;               // shared RPC descriptor
538    uint32_t           responses;         // shared RPC responses counter
[409]539
[457]540    client    = CURRENT_THREAD;
541    client_xp = XPTR( local_cxy , client );
542    local     = NULL;
543    remote_nr = 0;
[435]544
[583]545    // check calling thread can yield
546    thread_assert_can_yield( client , __FUNCTION__ );
[564]547
[438]548#if DEBUG_PROCESS_SIGACTION
[433]549uint32_t cycle = (uint32_t)hal_get_cycles();
[438]550if( DEBUG_PROCESS_SIGACTION < cycle )
[593]551printk("\n[%s] thread[%x,%x] enter to %s process %x / cycle %d\n",
[583]552__FUNCTION__ , client->process->pid, client->trdid,
[457]553process_action_str( type ) , pid , cycle );
[433]554#endif
[409]555
[436]556    // get pointer on local cluster manager
[416]557    cluster = LOCAL_CLUSTER;
558
[409]559    // get owner cluster identifier and process lpid
[435]560    owner_cxy = CXY_FROM_PID( pid );
561    lpid      = LPID_FROM_PID( pid );
[409]562
[593]563    // get root of list of copies and lock from owner cluster
[436]564    root_xp   = XPTR( owner_cxy , &cluster->pmgr.copies_root[lpid] );
565    lock_xp   = XPTR( owner_cxy , &cluster->pmgr.copies_lock[lpid] );
[435]566
[583]567// check action type
568assert( ((type == DELETE_ALL_THREADS ) ||
569         (type == BLOCK_ALL_THREADS )  ||
570         (type == UNBLOCK_ALL_THREADS )), "illegal action type" );
[416]571             
[593]572    // This client thread send parallel RPCs to all remote clusters containing
[564]573    // target process copies, wait all responses, and then handles directly
574    // the threads in local cluster, when required.
[457]575    // The client thread allocates a - shared - RPC descriptor in the stack,
576    // because all parallel, non-blocking, server threads use the same input
577    // arguments, and use the shared RPC response field
[436]578
579    // mask IRQs
580    hal_disable_irq( &save_sr);
581
[457]582    // client thread blocks itself
583    thread_block( client_xp , THREAD_BLOCKED_RPC );
[436]584
[619]585    // initialize RPC responses counter
586    responses = 0;
587
[436]588    // initialize shared RPC descriptor
[619]589    // can be shared, because no out arguments
590    rpc.rsp       = &responses;
[438]591    rpc.blocking  = false;
592    rpc.index     = RPC_PROCESS_SIGACTION;
593    rpc.thread    = client;
594    rpc.lid       = client->core->lid;
[611]595    rpc.args[0]   = pid;
596    rpc.args[1]   = type;
[436]597
[611]598    // take the lock protecting process copies
599    remote_queuelock_acquire( lock_xp );
600
[457]601    // scan list of process copies
[409]602    XLIST_FOREACH( root_xp , iter_xp )
603    {
[457]604        // get extended pointers and cluster on process
[440]605        process_xp  = XLIST_ELEMENT( iter_xp , process_t , copies_list );
606        process_cxy = GET_CXY( process_xp );
[457]607        process_ptr = GET_PTR( process_xp );
[440]608
[593]609        if( process_cxy == local_cxy )    // process copy is local
[457]610        { 
611            local = process_ptr;
612        }
[593]613        else                              // process copy is remote
[457]614        {
615            // update number of remote process copies
616            remote_nr++;
617
[619]618            // atomically increment RPC responses counter
619            hal_atomic_add( &responses , 1 );
[457]620
[438]621#if DEBUG_PROCESS_SIGACTION
622if( DEBUG_PROCESS_SIGACTION < cycle )
[593]623printk("\n[%s] thread[%x,%x] send RPC to cluster %x for process %x\n",
[583]624__FUNCTION__, client->process->pid, client->trdid, process_cxy, pid );
[433]625#endif
[457]626            // call RPC in target cluster
[619]627            rpc_send( process_cxy , &rpc );
[457]628        }
629    }  // end list of copies
630
[409]631    // release the lock protecting process copies
[564]632    remote_queuelock_release( lock_xp );
[409]633
[436]634    // restore IRQs
635    hal_restore_irq( save_sr);
[409]636
[457]637    // - if there is remote process copies, the client thread deschedules,
638    //   (it will be unblocked by the last RPC server thread).
639    // - if there is no remote copies, the client thread unblock itself.
640    if( remote_nr )
641    {
642        sched_yield("blocked on rpc_process_sigaction");
643    } 
644    else
645    {
646        thread_unblock( client_xp , THREAD_BLOCKED_RPC );
647    }
[409]648
[457]649    // handle the local process copy if required
650    if( local != NULL )
651    {
652
653#if DEBUG_PROCESS_SIGACTION
654if( DEBUG_PROCESS_SIGACTION < cycle )
[593]655printk("\n[%s] thread[%x,%x] handles local process %x in cluster %x\n",
[583]656__FUNCTION__, client->process->pid, client->trdid, pid , local_cxy );
[457]657#endif
658        if     (type == DELETE_ALL_THREADS  ) process_delete_threads ( local , client_xp ); 
[583]659        else if(type == BLOCK_ALL_THREADS   ) process_block_threads  ( local ); 
[457]660        else if(type == UNBLOCK_ALL_THREADS ) process_unblock_threads( local );
661    }
662
[438]663#if DEBUG_PROCESS_SIGACTION
[433]664cycle = (uint32_t)hal_get_cycles();
[438]665if( DEBUG_PROCESS_SIGACTION < cycle )
[593]666printk("\n[%s] thread[%x,%x] exit after %s process %x / cycle %d\n",
[583]667__FUNCTION__, client->process->pid, client->trdid,
[457]668process_action_str( type ), pid, cycle );
[433]669#endif
[416]670
[409]671}  // end process_sigaction()
672
[433]673/////////////////////////////////////////////////
[583]674void process_block_threads( process_t * process )
[1]675{
[409]676    thread_t          * target;         // pointer on target thread
[433]677    thread_t          * this;           // pointer on calling thread
[564]678    uint32_t            ltid;           // index in process th_tbl[]
[436]679    cxy_t               owner_cxy;      // target process owner cluster
[409]680    uint32_t            count;          // requests counter
[593]681    volatile uint32_t   ack_count;      // acknowledges counter
[1]682
[416]683    // get calling thread pointer
[433]684    this = CURRENT_THREAD;
[407]685
[438]686#if DEBUG_PROCESS_SIGACTION
[564]687pid_t pid = process->pid;
[433]688uint32_t cycle = (uint32_t)hal_get_cycles();
[438]689if( DEBUG_PROCESS_SIGACTION < cycle )
[593]690printk("\n[%s] thread[%x,%x] enter for process %x in cluster %x / cycle %d\n",
[583]691__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy , cycle );
[433]692#endif
[409]693
[564]694// check target process is an user process
[619]695assert( (LPID_FROM_PID( process->pid ) != 0 ),
696"process %x is not an user process\n", process->pid );
[564]697
[610]698    // get target process owner cluster
[564]699    owner_cxy = CXY_FROM_PID( process->pid );
700
[409]701    // get lock protecting process th_tbl[]
[564]702    rwlock_rd_acquire( &process->th_lock );
[1]703
[440]704    // loop on target process local threads
[409]705    // we use both "ltid" and "count" because it can exist "holes" in th_tbl
[593]706    // - if the calling thread and the target thread are not running on the same
707    //   core, we ask the target scheduler to acknowlege the blocking
708    //   to be sure that the target thread is not running.
709    // - if the calling thread and the target thread are running on the same core,
710    //   we don't need confirmation from scheduler.
711           
[436]712    for( ltid = 0 , count = 0 , ack_count = 0 ; count < process->th_nr ; ltid++ )
[1]713    {
[409]714        target = process->th_tbl[ltid];
[1]715
[436]716        if( target != NULL )                                 // thread exist
[1]717        {
718            count++;
[409]719
[583]720            // set the global blocked bit in target thread descriptor.
721            thread_block( XPTR( local_cxy , target ) , THREAD_BLOCKED_GLOBAL );
[436]722 
[583]723            if( this->core->lid != target->core->lid )
724            {
725                // increment responses counter
726                hal_atomic_add( (void*)&ack_count , 1 );
[409]727
[583]728                // set FLAG_REQ_ACK and &ack_rsp_count in target descriptor
729                thread_set_req_ack( target , (uint32_t *)&ack_count );
[409]730
[583]731                // force scheduling on target thread
732                dev_pic_send_ipi( local_cxy , target->core->lid );
[409]733            }
[1]734        }
[172]735    }
736
[428]737    // release lock protecting process th_tbl[]
[564]738    rwlock_rd_release( &process->th_lock );
[416]739
[593]740    // wait other threads acknowledges  TODO this could be improved...
[409]741    while( 1 )
742    {
[610]743        // exit when all scheduler acknowledges received
[436]744        if ( ack_count == 0 ) break;
[409]745   
746        // wait 1000 cycles before retry
747        hal_fixed_delay( 1000 );
748    }
[1]749
[438]750#if DEBUG_PROCESS_SIGACTION
[433]751cycle = (uint32_t)hal_get_cycles();
[438]752if( DEBUG_PROCESS_SIGACTION < cycle )
[593]753printk("\n[%s] thread[%x,%x] exit for process %x in cluster %x / cycle %d\n",
754__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy , cycle );
[433]755#endif
[409]756
[428]757}  // end process_block_threads()
[409]758
[440]759/////////////////////////////////////////////////
760void process_delete_threads( process_t * process,
761                             xptr_t      client_xp )
[409]762{
[433]763    thread_t          * this;          // pointer on calling thread
[440]764    thread_t          * target;        // local pointer on target thread
765    xptr_t              target_xp;     // extended pointer on target thread
766    cxy_t               owner_cxy;     // owner process cluster
[409]767    uint32_t            ltid;          // index in process th_tbl
[440]768    uint32_t            count;         // threads counter
[409]769
[433]770    // get calling thread pointer
771    this = CURRENT_THREAD;
[409]772
[440]773    // get target process owner cluster
774    owner_cxy = CXY_FROM_PID( process->pid );
775
[438]776#if DEBUG_PROCESS_SIGACTION
[433]777uint32_t cycle = (uint32_t)hal_get_cycles();
[438]778if( DEBUG_PROCESS_SIGACTION < cycle )
[593]779printk("\n[%s] thread[%x,%x] enter in cluster %x for process %x / cycle %d\n",
[583]780__FUNCTION__, this->process->pid, this->trdid, local_cxy, process->pid, cycle );
[433]781#endif
782
[564]783// check target process is an user process
[619]784assert( (LPID_FROM_PID( process->pid ) != 0),
785"process %x is not an user process\n", process->pid );
[564]786
[409]787    // get lock protecting process th_tbl[]
[583]788    rwlock_wr_acquire( &process->th_lock );
[409]789
[440]790    // loop on target process local threads                       
[416]791    // we use both "ltid" and "count" because it can exist "holes" in th_tbl
[440]792    for( ltid = 0 , count = 0  ; count < process->th_nr ; ltid++ )
[1]793    {
[409]794        target = process->th_tbl[ltid];
[1]795
[440]796        if( target != NULL )    // valid thread 
[1]797        {
[416]798            count++;
[440]799            target_xp = XPTR( local_cxy , target );
[1]800
[564]801            // main thread and client thread should not be deleted
[440]802            if( ((ltid != 0) || (owner_cxy != local_cxy)) &&         // not main thread
803                (client_xp) != target_xp )                           // not client thread
804            {
805                // mark target thread for delete and block it
806                thread_delete( target_xp , process->pid , false );   // not forced
807            }
[409]808        }
809    }
[1]810
[428]811    // release lock protecting process th_tbl[]
[583]812    rwlock_wr_release( &process->th_lock );
[407]813
[438]814#if DEBUG_PROCESS_SIGACTION
[433]815cycle = (uint32_t)hal_get_cycles();
[438]816if( DEBUG_PROCESS_SIGACTION < cycle )
[593]817printk("\n[%s] thread[%x,%x] exit for process %x in cluster %x / cycle %d\n",
818__FUNCTION__, this->process->pid, this->trdid, process->pid, local_cxy , cycle );
[433]819#endif
[407]820
[440]821}  // end process_delete_threads()
[409]822
[440]823///////////////////////////////////////////////////
824void process_unblock_threads( process_t * process )
[409]825{
[440]826    thread_t          * target;        // pointer on target thead
827    thread_t          * this;          // pointer on calling thread
[409]828    uint32_t            ltid;          // index in process th_tbl
[440]829    uint32_t            count;         // requests counter
[409]830
[440]831    // get calling thread pointer
832    this = CURRENT_THREAD;
833
[438]834#if DEBUG_PROCESS_SIGACTION
[564]835pid_t pid = process->pid;
[433]836uint32_t cycle = (uint32_t)hal_get_cycles();
[438]837if( DEBUG_PROCESS_SIGACTION < cycle )
[593]838printk("\n[%s] thread[%x,%x] enter for process %x in cluster %x / cycle %d\n",
[583]839__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy , cycle );
[433]840#endif
841
[564]842// check target process is an user process
[619]843assert( ( LPID_FROM_PID( process->pid ) != 0 ),
844"process %x is not an user process\n", process->pid );
[564]845
[416]846    // get lock protecting process th_tbl[]
[564]847    rwlock_rd_acquire( &process->th_lock );
[416]848
[440]849    // loop on process threads to unblock all threads
[416]850    // we use both "ltid" and "count" because it can exist "holes" in th_tbl
[440]851    for( ltid = 0 , count = 0 ; count < process->th_nr ; ltid++ )
[409]852    {
[416]853        target = process->th_tbl[ltid];
[409]854
[440]855        if( target != NULL )             // thread found
[409]856        {
857            count++;
[440]858
859            // reset the global blocked bit in target thread descriptor.
860            thread_unblock( XPTR( local_cxy , target ) , THREAD_BLOCKED_GLOBAL );
[1]861        }
862    }
863
[428]864    // release lock protecting process th_tbl[]
[564]865    rwlock_rd_release( &process->th_lock );
[407]866
[438]867#if DEBUG_PROCESS_SIGACTION
[433]868cycle = (uint32_t)hal_get_cycles();
[438]869if( DEBUG_PROCESS_SIGACTION < cycle )
[593]870printk("\n[%s] thread[%x,%x] exit for process %x in cluster %x / cycle %d\n",
[583]871__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy, cycle );
[433]872#endif
[1]873
[440]874}  // end process_unblock_threads()
[407]875
[1]876///////////////////////////////////////////////
877process_t * process_get_local_copy( pid_t pid )
878{
879    error_t        error;
[172]880    process_t    * process_ptr;   // local pointer on process
[23]881    xptr_t         process_xp;    // extended pointer on process
[1]882
883    cluster_t * cluster = LOCAL_CLUSTER;
884
[564]885#if DEBUG_PROCESS_GET_LOCAL_COPY
886thread_t * this = CURRENT_THREAD;
887uint32_t cycle = (uint32_t)hal_get_cycles();
888if( DEBUG_PROCESS_GET_LOCAL_COPY < cycle )
[593]889printk("\n[%s] thread[%x,%x] enter for process %x in cluster %x / cycle %d\n",
[583]890__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy, cycle );
[564]891#endif
892
[1]893    // get lock protecting local list of processes
[564]894    remote_queuelock_acquire( XPTR( local_cxy , &cluster->pmgr.local_lock ) );
[1]895
896    // scan the local list of process descriptors to find the process
[23]897    xptr_t  iter;
898    bool_t  found = false;
899    XLIST_FOREACH( XPTR( local_cxy , &cluster->pmgr.local_root ) , iter )
[1]900    {
[23]901        process_xp  = XLIST_ELEMENT( iter , process_t , local_list );
[435]902        process_ptr = GET_PTR( process_xp );
[23]903        if( process_ptr->pid == pid )
[1]904        {
905            found = true;
906            break;
907        }
908    }
909
910    // release lock protecting local list of processes
[564]911    remote_queuelock_release( XPTR( local_cxy , &cluster->pmgr.local_lock ) );
[1]912
[172]913    // allocate memory for a new local process descriptor
[440]914    // and initialise it from reference cluster if not found
[1]915    if( !found )
916    {
917        // get extended pointer on reference process descriptor
[23]918        xptr_t ref_xp = cluster_get_reference_process_from_pid( pid );
[1]919
[492]920        assert( (ref_xp != XPTR_NULL) , "illegal pid\n" );
[23]921
[1]922        // allocate memory for local process descriptor
[23]923        process_ptr = process_alloc();
[443]924
[23]925        if( process_ptr == NULL )  return NULL;
[1]926
927        // initialize local process descriptor copy
[23]928        error = process_copy_init( process_ptr , ref_xp );
[443]929
[1]930        if( error ) return NULL;
931    }
932
[440]933#if DEBUG_PROCESS_GET_LOCAL_COPY
[564]934cycle = (uint32_t)hal_get_cycles();
[440]935if( DEBUG_PROCESS_GET_LOCAL_COPY < cycle )
[593]936printk("\n[%s] thread[%x,%x] exit in cluster %x / process %x / cycle %d\n",
[583]937__FUNCTION__, this->process->pid, this->trdid, local_cxy, process_ptr, cycle );
[440]938#endif
939
[23]940    return process_ptr;
[1]941
[409]942}  // end process_get_local_copy()
943
[436]944////////////////////////////////////////////
945pid_t process_get_ppid( xptr_t  process_xp )
946{
947    cxy_t       process_cxy;
948    process_t * process_ptr;
949    xptr_t      parent_xp;
950    cxy_t       parent_cxy;
951    process_t * parent_ptr;
952
953    // get process cluster and local pointer
954    process_cxy = GET_CXY( process_xp );
955    process_ptr = GET_PTR( process_xp );
956
957    // get pointers on parent process
[564]958    parent_xp  = (xptr_t)hal_remote_l64( XPTR( process_cxy , &process_ptr->parent_xp ) );
[436]959    parent_cxy = GET_CXY( parent_xp );
960    parent_ptr = GET_PTR( parent_xp );
961
[564]962    return hal_remote_l32( XPTR( parent_cxy , &parent_ptr->pid ) );
[436]963}
964
[1]965//////////////////////////////////////////////////////////////////////////////////////////
966// File descriptor array related functions
967//////////////////////////////////////////////////////////////////////////////////////////
968
969///////////////////////////////////////////
970void process_fd_init( process_t * process )
971{
972    uint32_t fd;
973
[610]974    // initialize lock
[564]975    remote_queuelock_init( XPTR( local_cxy , &process->fd_array.lock ), LOCK_PROCESS_FDARRAY );
[1]976
[610]977    // initialize number of open files
[23]978    process->fd_array.current = 0;
979
[1]980    // initialize array
[23]981    for ( fd = 0 ; fd < CONFIG_PROCESS_FILE_MAX_NR ; fd++ )
[1]982    {
983        process->fd_array.array[fd] = XPTR_NULL;
984    }
985}
[610]986////////////////////////////////////////////////////
987error_t process_fd_register( xptr_t      process_xp,
[407]988                             xptr_t      file_xp,
989                             uint32_t  * fdid )
[1]990{
991    bool_t    found;
[23]992    uint32_t  id;
993    xptr_t    xp;
[1]994
[23]995    // get reference process cluster and local pointer
[610]996    process_t * process_ptr = GET_PTR( process_xp );
997    cxy_t       process_cxy = GET_CXY( process_xp );
[23]998
[610]999// check client process is reference process
1000assert( (process_xp == hal_remote_l64( XPTR( process_cxy , &process_ptr->ref_xp ) ) ),
1001"client process must be reference process\n" );
1002
1003#if DEBUG_PROCESS_FD_REGISTER
1004thread_t * this  = CURRENT_THREAD;
1005uint32_t   cycle = (uint32_t)hal_get_cycles();
1006pid_t      pid   = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid) );
1007if( DEBUG_PROCESS_FD_REGISTER < cycle )
1008printk("\n[%s] thread[%x,%x] enter for process %x / cycle %d\n",
1009__FUNCTION__, this->process->pid, this->trdid, pid, cycle );
1010#endif
1011
1012    // build extended pointer on lock protecting reference fd_array
1013    xptr_t lock_xp = XPTR( process_cxy , &process_ptr->fd_array.lock );
1014
[23]1015    // take lock protecting reference fd_array
[610]1016        remote_queuelock_acquire( lock_xp );
[23]1017
[1]1018    found   = false;
1019
[23]1020    for ( id = 0; id < CONFIG_PROCESS_FILE_MAX_NR ; id++ )
[1]1021    {
[610]1022        xp = hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[id] ) );
[23]1023        if ( xp == XPTR_NULL )
[1]1024        {
[564]1025            // update reference fd_array
[610]1026            hal_remote_s64( XPTR( process_cxy , &process_ptr->fd_array.array[id] ) , file_xp );
1027                hal_remote_atomic_add( XPTR( process_cxy , &process_ptr->fd_array.current ) , 1 );
[564]1028
1029            // exit
1030                        *fdid = id;
[1]1031            found = true;
1032            break;
1033        }
1034    }
1035
[610]1036    // release lock protecting fd_array
1037        remote_queuelock_release( lock_xp );
[1]1038
[610]1039#if DEBUG_PROCESS_FD_REGISTER
1040cycle = (uint32_t)hal_get_cycles();
1041if( DEBUG_PROCESS_FD_REGISTER < cycle )
1042printk("\n[%s] thread[%x,%x] exit for process %x / fdid %d / cycle %d\n",
1043__FUNCTION__, this->process->pid, this->trdid, pid, id, cycle );
1044#endif
1045
[428]1046    if ( !found ) return -1;
[1]1047    else          return 0;
1048
[610]1049}  // end process_fd_register()
1050
[172]1051////////////////////////////////////////////////
[23]1052xptr_t process_fd_get_xptr( process_t * process,
[407]1053                            uint32_t    fdid )
[1]1054{
[23]1055    xptr_t  file_xp;
[564]1056    xptr_t  lock_xp;
[1]1057
[23]1058    // access local copy of process descriptor
[407]1059    file_xp = process->fd_array.array[fdid];
[1]1060
[23]1061    if( file_xp == XPTR_NULL )
1062    {
1063        // get reference process cluster and local pointer
1064        xptr_t      ref_xp  = process->ref_xp;
1065        cxy_t       ref_cxy = GET_CXY( ref_xp );
[435]1066        process_t * ref_ptr = GET_PTR( ref_xp );
[1]1067
[564]1068        // build extended pointer on lock protecting reference fd_array
1069        lock_xp = XPTR( ref_cxy , &ref_ptr->fd_array.lock );
1070
1071        // take lock protecting reference fd_array
1072            remote_queuelock_acquire( lock_xp );
1073
[23]1074        // access reference process descriptor
[564]1075        file_xp = hal_remote_l64( XPTR( ref_cxy , &ref_ptr->fd_array.array[fdid] ) );
[1]1076
[23]1077        // update local fd_array if found
[564]1078        if( file_xp != XPTR_NULL )  process->fd_array.array[fdid] = file_xp;
1079       
1080        // release lock protecting reference fd_array
1081            remote_queuelock_release( lock_xp );
[23]1082    }
[1]1083
[23]1084    return file_xp;
[1]1085
[407]1086}  // end process_fd_get_xptr()
1087
[1]1088///////////////////////////////////////////
1089void process_fd_remote_copy( xptr_t dst_xp,
1090                             xptr_t src_xp )
1091{
1092    uint32_t fd;
1093    xptr_t   entry;
1094
1095    // get cluster and local pointer for src fd_array
1096    cxy_t        src_cxy = GET_CXY( src_xp );
[435]1097    fd_array_t * src_ptr = GET_PTR( src_xp );
[1]1098
1099    // get cluster and local pointer for dst fd_array
1100    cxy_t        dst_cxy = GET_CXY( dst_xp );
[435]1101    fd_array_t * dst_ptr = GET_PTR( dst_xp );
[1]1102
1103    // get the remote lock protecting the src fd_array
[564]1104        remote_queuelock_acquire( XPTR( src_cxy , &src_ptr->lock ) );
[1]1105
[428]1106    // loop on all fd_array entries
1107    for( fd = 0 ; fd < CONFIG_PROCESS_FILE_MAX_NR ; fd++ )
[1]1108        {
[564]1109                entry = (xptr_t)hal_remote_l64( XPTR( src_cxy , &src_ptr->array[fd] ) );
[1]1110
1111                if( entry != XPTR_NULL )
1112                {
[459]1113            // increment file descriptor refcount
[1]1114            vfs_file_count_up( entry );
1115
1116                        // copy entry in destination process fd_array
[564]1117                        hal_remote_s64( XPTR( dst_cxy , &dst_ptr->array[fd] ) , entry );
[1]1118                }
1119        }
1120
1121    // release lock on source process fd_array
[564]1122        remote_queuelock_release( XPTR( src_cxy , &src_ptr->lock ) );
[1]1123
[407]1124}  // end process_fd_remote_copy()
1125
[564]1126
1127////////////////////////////////////
1128bool_t process_fd_array_full( void )
1129{
1130    // get extended pointer on reference process
1131    xptr_t ref_xp = CURRENT_THREAD->process->ref_xp;
1132
1133    // get reference process cluster and local pointer
1134    process_t * ref_ptr = GET_PTR( ref_xp );
1135    cxy_t       ref_cxy = GET_CXY( ref_xp );
1136
1137    // get number of open file descriptors from reference fd_array
1138    uint32_t current = hal_remote_l32( XPTR( ref_cxy , &ref_ptr->fd_array.current ) );
1139
1140        return ( current >= CONFIG_PROCESS_FILE_MAX_NR );
1141}
1142
1143
[1]1144////////////////////////////////////////////////////////////////////////////////////
1145//  Thread related functions
1146////////////////////////////////////////////////////////////////////////////////////
1147
1148/////////////////////////////////////////////////////
1149error_t process_register_thread( process_t * process,
1150                                 thread_t  * thread,
1151                                 trdid_t   * trdid )
1152{
[472]1153    ltid_t         ltid;
1154    bool_t         found = false;
1155 
[564]1156// check arguments
1157assert( (process != NULL) , "process argument is NULL" );
1158assert( (thread != NULL) , "thread argument is NULL" );
[1]1159
[564]1160    // get the lock protecting th_tbl for all threads
1161    // but the idle thread executing kernel_init (cannot yield)
1162    if( thread->type != THREAD_IDLE ) rwlock_wr_acquire( &process->th_lock );
[1]1163
[583]1164    // scan th_tbl
[564]1165    for( ltid = 0 ; ltid < CONFIG_THREADS_MAX_PER_CLUSTER ; ltid++ )
[1]1166    {
1167        if( process->th_tbl[ltid] == NULL )
1168        {
1169            found = true;
1170            break;
1171        }
1172    }
1173
1174    if( found )
1175    {
1176        // register thread in th_tbl[]
1177        process->th_tbl[ltid] = thread;
1178        process->th_nr++;
1179
1180        // returns trdid
1181        *trdid = TRDID( local_cxy , ltid );
1182    }
1183
[583]1184    // release the lock protecting th_tbl
[564]1185    if( thread->type != THREAD_IDLE ) rwlock_wr_release( &process->th_lock );
[428]1186
[564]1187    return (found) ? 0 : 0xFFFFFFFF;
[204]1188
1189}  // end process_register_thread()
1190
[443]1191/////////////////////////////////////////////////
1192bool_t process_remove_thread( thread_t * thread )
[1]1193{
[443]1194    uint32_t count;  // number of threads in local process descriptor
1195
[1]1196    process_t * process = thread->process;
1197
1198    // get thread local index
1199    ltid_t  ltid = LTID_FROM_TRDID( thread->trdid );
[564]1200   
1201    // get the lock protecting th_tbl[]
1202    rwlock_wr_acquire( &process->th_lock );
[428]1203
[583]1204    // get number of threads
[443]1205    count = process->th_nr;
[428]1206
[583]1207// check thread
1208assert( (thread != NULL) , "thread argument is NULL" );
1209
[564]1210// check th_nr value
[624]1211assert( (count > 0) , "process th_nr cannot be 0" );
[443]1212
[1]1213    // remove thread from th_tbl[]
1214    process->th_tbl[ltid] = NULL;
[450]1215    process->th_nr = count-1;
[1]1216
[583]1217    // release lock protecting th_tbl
[564]1218    rwlock_wr_release( &process->th_lock );
[428]1219
[443]1220    return (count == 1);
1221
[450]1222}  // end process_remove_thread()
[204]1223
[408]1224/////////////////////////////////////////////////////////
1225error_t process_make_fork( xptr_t      parent_process_xp,
1226                           xptr_t      parent_thread_xp,
1227                           pid_t     * child_pid,
1228                           thread_t ** child_thread )
[1]1229{
[408]1230    process_t * process;         // local pointer on child process descriptor
1231    thread_t  * thread;          // local pointer on child thread descriptor
1232    pid_t       new_pid;         // process identifier for child process
1233    pid_t       parent_pid;      // process identifier for parent process
1234    xptr_t      ref_xp;          // extended pointer on reference process
[428]1235    xptr_t      vfs_bin_xp;      // extended pointer on .elf file
[408]1236    error_t     error;
[1]1237
[408]1238    // get cluster and local pointer for parent process
1239    cxy_t       parent_process_cxy = GET_CXY( parent_process_xp );
[435]1240    process_t * parent_process_ptr = GET_PTR( parent_process_xp );
[101]1241
[428]1242    // get parent process PID and extended pointer on .elf file
[564]1243    parent_pid = hal_remote_l32 (XPTR( parent_process_cxy , &parent_process_ptr->pid));
1244    vfs_bin_xp = hal_remote_l64(XPTR( parent_process_cxy , &parent_process_ptr->vfs_bin_xp));
[428]1245
[564]1246    // get extended pointer on reference process
1247    ref_xp = hal_remote_l64( XPTR( parent_process_cxy , &parent_process_ptr->ref_xp ) );
[438]1248
[564]1249// check parent process is the reference process
1250assert( (parent_process_xp == ref_xp ) ,
[624]1251"parent process must be the reference process" );
[407]1252
[438]1253#if DEBUG_PROCESS_MAKE_FORK
[583]1254uint32_t cycle   = (uint32_t)hal_get_cycles();
1255thread_t * this  = CURRENT_THREAD;
1256trdid_t    trdid = this->trdid;
1257pid_t      pid   = this->process->pid;
[438]1258if( DEBUG_PROCESS_MAKE_FORK < cycle )
[593]1259printk("\n[%s] thread[%x,%x] enter / cluster %x / cycle %d\n",
[583]1260__FUNCTION__, pid, trdid, local_cxy, cycle );
[433]1261#endif
[172]1262
[408]1263    // allocate a process descriptor
1264    process = process_alloc();
1265    if( process == NULL )
1266    {
1267        printk("\n[ERROR] in %s : cannot get process in cluster %x\n", 
1268        __FUNCTION__, local_cxy ); 
1269        return -1;
1270    }
[1]1271
[408]1272    // allocate a child PID from local cluster
[416]1273    error = cluster_pid_alloc( process , &new_pid );
[428]1274    if( error ) 
[1]1275    {
[408]1276        printk("\n[ERROR] in %s : cannot get PID in cluster %x\n", 
1277        __FUNCTION__, local_cxy ); 
1278        process_free( process );
1279        return -1;
[1]1280    }
[408]1281
[469]1282#if( DEBUG_PROCESS_MAKE_FORK & 1 )
[457]1283cycle = (uint32_t)hal_get_cycles();
1284if( DEBUG_PROCESS_MAKE_FORK < cycle )
[593]1285printk("\n[%s] thread[%x,%x] allocated process %x / cycle %d\n",
[583]1286__FUNCTION__, pid, trdid, new_pid, cycle );
[457]1287#endif
1288
[408]1289    // initializes child process descriptor from parent process descriptor
1290    process_reference_init( process,
1291                            new_pid,
1292                            parent_process_xp );
1293
[438]1294#if( DEBUG_PROCESS_MAKE_FORK & 1 )
[433]1295cycle = (uint32_t)hal_get_cycles();
[438]1296if( DEBUG_PROCESS_MAKE_FORK < cycle )
[593]1297printk("\n[%s] thread[%x,%x] initialized child_process %x / cycle %d\n",
[583]1298__FUNCTION__, pid, trdid, new_pid, cycle );
[433]1299#endif
[408]1300
[457]1301
[408]1302    // copy VMM from parent descriptor to child descriptor
1303    error = vmm_fork_copy( process,
1304                           parent_process_xp );
1305    if( error )
[101]1306    {
[408]1307        printk("\n[ERROR] in %s : cannot copy VMM in cluster %x\n", 
1308        __FUNCTION__, local_cxy ); 
1309        process_free( process );
1310        cluster_pid_release( new_pid );
1311        return -1;
[101]1312    }
[172]1313
[438]1314#if( DEBUG_PROCESS_MAKE_FORK & 1 )
[433]1315cycle = (uint32_t)hal_get_cycles();
[438]1316if( DEBUG_PROCESS_MAKE_FORK < cycle )
[593]1317printk("\n[%s] thread[%x,%x] copied VMM from parent to child / cycle %d\n",
[583]1318__FUNCTION__, pid, trdid, cycle );
[433]1319#endif
[407]1320
[564]1321    // if parent_process is INIT, or if parent_process is the TXT owner,
1322    // the child_process becomes the owner of its TXT terminal
1323    if( (parent_pid == 1) || process_txt_is_owner( parent_process_xp ) )
[457]1324    {
1325        process_txt_set_ownership( XPTR( local_cxy , process ) );
1326
1327#if( DEBUG_PROCESS_MAKE_FORK & 1 )
1328cycle = (uint32_t)hal_get_cycles();
1329if( DEBUG_PROCESS_MAKE_EXEC < cycle )
[593]1330printk("\n[%s] thread[%x,%x] / child takes TXT ownership / cycle %d\n",
[583]1331__FUNCTION__ , pid, trdid, cycle );
[457]1332#endif
1333
1334    }
1335
[428]1336    // update extended pointer on .elf file
1337    process->vfs_bin_xp = vfs_bin_xp;
1338
[408]1339    // create child thread descriptor from parent thread descriptor
1340    error = thread_user_fork( parent_thread_xp,
1341                              process,
1342                              &thread );
1343    if( error )
1344    {
1345        printk("\n[ERROR] in %s : cannot create thread in cluster %x\n",
1346        __FUNCTION__, local_cxy ); 
1347        process_free( process );
1348        cluster_pid_release( new_pid );
1349        return -1;
1350    }
[172]1351
[564]1352// check main thread LTID
1353assert( (LTID_FROM_TRDID(thread->trdid) == 0) ,
[624]1354"main thread must have LTID == 0" );
[428]1355
[564]1356#if( DEBUG_PROCESS_MAKE_FORK & 1 )
[433]1357cycle = (uint32_t)hal_get_cycles();
[438]1358if( DEBUG_PROCESS_MAKE_FORK < cycle )
[593]1359printk("\n[%s] thread[%x,%x] created main thread %x / cycle %d\n", 
[583]1360__FUNCTION__, pid, trdid, thread, cycle );
[433]1361#endif
[1]1362
[433]1363    // set Copy_On_Write flag in parent process GPT
[408]1364    // this includes all replicated GPT copies
1365    if( parent_process_cxy == local_cxy )   // reference is local
1366    {
1367        vmm_set_cow( parent_process_ptr );
1368    }
1369    else                                    // reference is remote
1370    {
1371        rpc_vmm_set_cow_client( parent_process_cxy,
1372                                parent_process_ptr );
1373    }
[1]1374
[433]1375    // set Copy_On_Write flag in child process GPT
1376    vmm_set_cow( process );
1377 
[438]1378#if( DEBUG_PROCESS_MAKE_FORK & 1 )
[433]1379cycle = (uint32_t)hal_get_cycles();
[438]1380if( DEBUG_PROCESS_MAKE_FORK < cycle )
[593]1381printk("\n[%s] thread[%x,%x] set COW in parent and child / cycle %d\n",
[583]1382__FUNCTION__, pid, trdid, cycle );
[433]1383#endif
[101]1384
[428]1385    // get extended pointers on parent children_root, children_lock and children_nr
1386    xptr_t children_root_xp = XPTR( parent_process_cxy , &parent_process_ptr->children_root );
1387    xptr_t children_lock_xp = XPTR( parent_process_cxy , &parent_process_ptr->children_lock );
1388    xptr_t children_nr_xp   = XPTR( parent_process_cxy , &parent_process_ptr->children_nr   );
[101]1389
[428]1390    // register process in parent children list
[564]1391    remote_queuelock_acquire( children_lock_xp );
[428]1392        xlist_add_last( children_root_xp , XPTR( local_cxy , &process->children_list ) );
1393        hal_remote_atomic_add( children_nr_xp , 1 );
[564]1394    remote_queuelock_release( children_lock_xp );
[204]1395
[408]1396    // return success
1397    *child_thread = thread;
1398    *child_pid    = new_pid;
[1]1399
[438]1400#if DEBUG_PROCESS_MAKE_FORK
[433]1401cycle = (uint32_t)hal_get_cycles();
[438]1402if( DEBUG_PROCESS_MAKE_FORK < cycle )
[593]1403printk("\n[%s] thread[%x,%x] exit / created process %x / cycle %d\n",
[583]1404__FUNCTION__, pid, trdid, new_pid, cycle );
[433]1405#endif
[428]1406
[408]1407    return 0;
1408
[416]1409}   // end process_make_fork()
[408]1410
1411/////////////////////////////////////////////////////
1412error_t process_make_exec( exec_info_t  * exec_info )
1413{
[457]1414    thread_t       * thread;                  // local pointer on this thread
1415    process_t      * process;                 // local pointer on this process
1416    pid_t            pid;                     // this process identifier
[610]1417    xptr_t           ref_xp;                  // reference process for this process
[441]1418        error_t          error;                   // value returned by called functions
[457]1419    char           * path;                    // path to .elf file
1420    xptr_t           file_xp;                 // extended pointer on .elf file descriptor
1421    uint32_t         file_id;                 // file index in fd_array
1422    uint32_t         args_nr;                 // number of main thread arguments
1423    char          ** args_pointers;           // array of pointers on main thread arguments
[446]1424
[610]1425    // get thread, process, pid and ref_xp
[457]1426    thread  = CURRENT_THREAD;
1427    process = thread->process;
1428    pid     = process->pid;
[610]1429    ref_xp  = process->ref_xp;
[408]1430
[457]1431        // get relevant infos from exec_info
1432        path          = exec_info->path;
1433    args_nr       = exec_info->args_nr;
1434    args_pointers = exec_info->args_pointers;
[408]1435
[438]1436#if DEBUG_PROCESS_MAKE_EXEC
[433]1437uint32_t cycle = (uint32_t)hal_get_cycles();
[438]1438if( DEBUG_PROCESS_MAKE_EXEC < cycle )
[593]1439printk("\n[%s] thread[%x,%x] enters for %s / cycle %d\n",
[583]1440__FUNCTION__, pid, thread->trdid, path, cycle );
[433]1441#endif
[408]1442
[457]1443    // open the file identified by <path>
1444    file_xp = XPTR_NULL;
[564]1445    file_id = 0xFFFFFFFF;
[610]1446        error   = vfs_open( process->vfs_root_xp,
[457]1447                            path,
[610]1448                        ref_xp,
[457]1449                            O_RDONLY,
1450                            0,
1451                            &file_xp,
1452                            &file_id );
1453        if( error )
1454        {
1455                printk("\n[ERROR] in %s : failed to open file <%s>\n", __FUNCTION__ , path );
1456                return -1;
1457        }
1458
[446]1459#if (DEBUG_PROCESS_MAKE_EXEC & 1)
[469]1460cycle = (uint32_t)hal_get_cycles();
[446]1461if( DEBUG_PROCESS_MAKE_EXEC < cycle )
[593]1462printk("\n[%s] thread[%x,%x] opened file <%s> / cycle %d\n",
[583]1463__FUNCTION__, pid, thread->trdid, path, cycle );
[446]1464#endif
1465
[457]1466    // delete all threads other than this main thread in all clusters
1467    process_sigaction( pid , DELETE_ALL_THREADS );
[446]1468
[469]1469#if (DEBUG_PROCESS_MAKE_EXEC & 1)
1470cycle = (uint32_t)hal_get_cycles();
1471if( DEBUG_PROCESS_MAKE_EXEC < cycle )
[593]1472printk("\n[%s] thread[%x,%x] deleted all threads / cycle %d\n",
[583]1473__FUNCTION__, pid, thread->trdid, cycle );
[469]1474#endif
1475
[457]1476    // reset local process VMM
1477    vmm_destroy( process );
[446]1478
[457]1479#if( DEBUG_PROCESS_MAKE_EXEC & 1 )
1480cycle = (uint32_t)hal_get_cycles();
1481if( DEBUG_PROCESS_MAKE_EXEC < cycle )
[593]1482printk("\n[%s] thread[%x,%x] reset VMM / cycle %d\n",
[583]1483__FUNCTION__, pid, thread->trdid, cycle );
[457]1484#endif
[408]1485
[457]1486    // re-initialize the VMM (kentry/args/envs vsegs registration)
1487    error = vmm_init( process );
1488    if( error )
[416]1489    {
[457]1490        printk("\n[ERROR] in %s : cannot initialise VMM for %s\n", __FUNCTION__ , path );
1491        vfs_close( file_xp , file_id );
[623]1492        // FIXME restore old process VMM [AG]
[416]1493        return -1;
1494    }
[457]1495   
[438]1496#if( DEBUG_PROCESS_MAKE_EXEC & 1 )
[433]1497cycle = (uint32_t)hal_get_cycles();
[438]1498if( DEBUG_PROCESS_MAKE_EXEC < cycle )
[593]1499printk("\n[%s] thread[%x,%x] / kentry/args/envs vsegs registered / cycle %d\n",
[583]1500__FUNCTION__, pid, thread->trdid, cycle );
[433]1501#endif
[428]1502
[457]1503    // register code & data vsegs as well as entry-point in process VMM,
[428]1504    // and register extended pointer on .elf file in process descriptor
[457]1505        error = elf_load_process( file_xp , process );
[441]1506    if( error )
[1]1507        {
[441]1508                printk("\n[ERROR] in %s : failed to access <%s>\n", __FUNCTION__ , path );
[457]1509        vfs_close( file_xp , file_id );
[623]1510        // FIXME restore old process VMM [AG]
[408]1511        return -1;
[1]1512        }
1513
[438]1514#if( DEBUG_PROCESS_MAKE_EXEC & 1 )
[433]1515cycle = (uint32_t)hal_get_cycles();
[438]1516if( DEBUG_PROCESS_MAKE_EXEC < cycle )
[593]1517printk("\n[%s] thread[%x,%x] / code/data vsegs registered / cycle %d\n",
[583]1518__FUNCTION__, pid, thread->trdid, cycle );
[433]1519#endif
[1]1520
[457]1521    // update the existing main thread descriptor... and jump to user code
1522    error = thread_user_exec( (void *)process->vmm.entry_point,
1523                              args_nr,
1524                              args_pointers );
1525    if( error )
1526    {
[469]1527        printk("\n[ERROR] in %s : cannot update main thread for %s\n", __FUNCTION__ , path );
[457]1528        vfs_close( file_xp , file_id );
1529        // FIXME restore old process VMM
[408]1530        return -1;
[457]1531    }
[1]1532
[492]1533    assert( false, "we should not execute this code");
[457]1534 
[409]1535        return 0;
1536
1537}  // end process_make_exec()
1538
[457]1539
[623]1540////////////////////////////////////////////////
1541void process_zero_create( process_t   * process,
1542                          boot_info_t * info )
[428]1543{
[580]1544    error_t error;
1545    pid_t   pid;
[428]1546
[438]1547#if DEBUG_PROCESS_ZERO_CREATE
[433]1548uint32_t cycle = (uint32_t)hal_get_cycles();
[438]1549if( DEBUG_PROCESS_ZERO_CREATE < cycle )
[593]1550printk("\n[%s] enter / cluster %x / cycle %d\n",
[564]1551__FUNCTION__, local_cxy, cycle );
[433]1552#endif
[428]1553
[624]1554    // get pointer on VMM
1555    vmm_t * vmm = &process->vmm;
1556
[580]1557    // get PID from local cluster manager for this kernel process
1558    error = cluster_pid_alloc( process , &pid );
1559
1560    if( error || (LPID_FROM_PID( pid ) != 0) )
1561    {
1562        printk("\n[PANIC] in %s : cannot get valid PID in cluster %x / PID = %x\n",
1563        __FUNCTION__ , local_cxy, pid );
1564        hal_core_sleep();
1565    }
1566
[428]1567    // initialize PID, REF_XP, PARENT_XP, and STATE
[580]1568    // the kernel process_zero is its own parent_process,
1569    // reference_process, and owner_process, and cannot be killed...
1570    process->pid        = pid;
[433]1571    process->ref_xp     = XPTR( local_cxy , process );
[443]1572    process->owner_xp   = XPTR( local_cxy , process );
[580]1573    process->parent_xp  = XPTR( local_cxy , process );
[433]1574    process->term_state = 0;
[428]1575
[624]1576    // initilise VSL as empty
1577    vmm->vsegs_nr = 0;
1578        xlist_root_init( XPTR( local_cxy , &vmm->vsegs_root ) );
1579        remote_rwlock_init( XPTR( local_cxy , &vmm->vsegs_lock ) , LOCK_VMM_VSL );
[623]1580
[624]1581    // initialise GPT as empty
1582    error = hal_gpt_create( &vmm->gpt );
1583
1584    if( error ) 
1585    {
1586        printk("\n[PANIC] in %s : cannot create empty GPT\n", __FUNCTION__ );
1587        hal_core_sleep();
1588    }
1589
1590    // initialize GPT lock
1591    remote_rwlock_init( XPTR( local_cxy , &vmm->gpt_lock ) , LOCK_VMM_GPT );
1592   
1593    // create kernel vsegs in GPT and VSL, as required by the hardware architecture
1594    error = hal_vmm_kernel_init( info );
1595
1596    if( error ) 
1597    {
1598        printk("\n[PANIC] in %s : cannot create kernel vsegs in VMM\n", __FUNCTION__ );
1599        hal_core_sleep();
1600    }
1601
[564]1602    // reset th_tbl[] array and associated fields
[428]1603    uint32_t i;
[564]1604    for( i = 0 ; i < CONFIG_THREADS_MAX_PER_CLUSTER ; i++ )
[428]1605        {
1606        process->th_tbl[i] = NULL;
1607    }
1608    process->th_nr  = 0;
[564]1609    rwlock_init( &process->th_lock , LOCK_PROCESS_THTBL );
[428]1610
[564]1611
[428]1612    // reset children list as empty
1613    xlist_root_init( XPTR( local_cxy , &process->children_root ) );
1614    process->children_nr = 0;
[564]1615    remote_queuelock_init( XPTR( local_cxy , &process->children_lock ),
1616                           LOCK_PROCESS_CHILDREN );
[428]1617
[580]1618    // register kernel process in cluster manager local_list
1619    cluster_process_local_link( process );
1620   
[428]1621        hal_fence();
1622
[438]1623#if DEBUG_PROCESS_ZERO_CREATE
[433]1624cycle = (uint32_t)hal_get_cycles();
[438]1625if( DEBUG_PROCESS_ZERO_CREATE < cycle )
[593]1626printk("\n[%s] exit / cluster %x / cycle %d\n",
[564]1627__FUNCTION__, local_cxy, cycle );
[433]1628#endif
[428]1629
[610]1630}  // end process_zero_create()
[428]1631
[564]1632////////////////////////////////
[485]1633void process_init_create( void )
[1]1634{
[428]1635    process_t      * process;       // local pointer on process descriptor
[409]1636    pid_t            pid;           // process_init identifier
1637    thread_t       * thread;        // local pointer on main thread
1638    pthread_attr_t   attr;          // main thread attributes
1639    lid_t            lid;           // selected core local index for main thread
[457]1640    xptr_t           file_xp;       // extended pointer on .elf file descriptor
1641    uint32_t         file_id;       // file index in fd_array
[409]1642    error_t          error;
[1]1643
[438]1644#if DEBUG_PROCESS_INIT_CREATE
[610]1645thread_t * this = CURRENT_THREAD;
[433]1646uint32_t cycle = (uint32_t)hal_get_cycles();
[438]1647if( DEBUG_PROCESS_INIT_CREATE < cycle )
[610]1648printk("\n[%s] thread[%x,%x] enter / cycle %d\n",
1649__FUNCTION__, this->process->pid, this->trdid, cycle );
[433]1650#endif
[1]1651
[408]1652    // allocates memory for process descriptor from local cluster
1653        process = process_alloc(); 
[457]1654       
[564]1655// check memory allocator
1656assert( (process != NULL),
[624]1657"no memory for process descriptor in cluster %x", local_cxy  );
[101]1658
[610]1659    // set the CWD and VFS_ROOT fields in process descriptor
1660    process->cwd_xp      = process_zero.vfs_root_xp;
1661    process->vfs_root_xp = process_zero.vfs_root_xp;
1662
[409]1663    // get PID from local cluster
[416]1664    error = cluster_pid_alloc( process , &pid );
[408]1665
[564]1666// check PID allocator
1667assert( (error == 0),
[624]1668"cannot allocate PID in cluster %x", local_cxy );
[409]1669
[564]1670// check PID value
1671assert( (pid == 1) ,
[624]1672"process INIT must be first process in cluster 0" );
[457]1673
[409]1674    // initialize process descriptor / parent is local process_zero
1675    process_reference_init( process,
[408]1676                            pid,
[457]1677                            XPTR( local_cxy , &process_zero ) ); 
[408]1678
[564]1679#if(DEBUG_PROCESS_INIT_CREATE & 1)
1680if( DEBUG_PROCESS_INIT_CREATE < cycle )
[610]1681printk("\n[%s] thread[%x,%x] initialized process descriptor\n",
1682__FUNCTION__, this->process->pid, this->trdid );
[564]1683#endif
1684
[457]1685    // open the file identified by CONFIG_PROCESS_INIT_PATH
1686    file_xp = XPTR_NULL;
1687    file_id = -1;
[610]1688        error   = vfs_open( process->vfs_root_xp,
[457]1689                            CONFIG_PROCESS_INIT_PATH,
[610]1690                        XPTR( local_cxy , process ),
[457]1691                            O_RDONLY,
1692                            0,
1693                            &file_xp,
1694                            &file_id );
1695
[564]1696assert( (error == 0),
[624]1697"failed to open file <%s>", CONFIG_PROCESS_INIT_PATH );
[457]1698
[564]1699#if(DEBUG_PROCESS_INIT_CREATE & 1)
1700if( DEBUG_PROCESS_INIT_CREATE < cycle )
[610]1701printk("\n[%s] thread[%x,%x] open .elf file decriptor\n",
1702__FUNCTION__, this->process->pid, this->trdid );
[564]1703#endif
1704
1705   // register "code" and "data" vsegs as well as entry-point
[409]1706    // in process VMM, using information contained in the elf file.
[457]1707        error = elf_load_process( file_xp , process );
[101]1708
[564]1709assert( (error == 0),
[624]1710"cannot access .elf file <%s>", CONFIG_PROCESS_INIT_PATH );
[457]1711
[564]1712#if(DEBUG_PROCESS_INIT_CREATE & 1)
1713if( DEBUG_PROCESS_INIT_CREATE < cycle )
[610]1714printk("\n[%s] thread[%x,%x] registered code/data vsegs in VMM\n",
1715__FUNCTION__, this->process->pid, this->trdid );
[564]1716#endif
1717
[428]1718    // get extended pointers on process_zero children_root, children_lock
1719    xptr_t children_root_xp = XPTR( local_cxy , &process_zero.children_root );
1720    xptr_t children_lock_xp = XPTR( local_cxy , &process_zero.children_lock );
1721
[564]1722    // take lock protecting kernel process children list
1723    remote_queuelock_acquire( children_lock_xp );
1724
[428]1725    // register process INIT in parent local process_zero
1726        xlist_add_last( children_root_xp , XPTR( local_cxy , &process->children_list ) );
1727        hal_atomic_add( &process_zero.children_nr , 1 );
1728
[564]1729    // release lock protecting kernel process children list
1730    remote_queuelock_release( children_lock_xp );
1731
1732#if(DEBUG_PROCESS_INIT_CREATE & 1)
1733if( DEBUG_PROCESS_INIT_CREATE < cycle )
[610]1734printk("\n[%s] thread[%x,%x] registered init process in parent\n",
1735__FUNCTION__, this->process->pid, this->trdid );
[564]1736#endif
1737
[409]1738    // select a core in local cluster to execute the main thread
1739    lid  = cluster_select_local_core();
1740
1741    // initialize pthread attributes for main thread
1742    attr.attributes = PT_ATTR_DETACH | PT_ATTR_CLUSTER_DEFINED | PT_ATTR_CORE_DEFINED;
1743    attr.cxy        = local_cxy;
1744    attr.lid        = lid;
1745
1746    // create and initialize thread descriptor
1747        error = thread_user_create( pid,
1748                                (void *)process->vmm.entry_point,
1749                                NULL,
1750                                &attr,
1751                                &thread );
[1]1752
[564]1753assert( (error == 0),
[624]1754"cannot create main thread for <%s>", CONFIG_PROCESS_INIT_PATH );
[428]1755
[564]1756assert( (thread->trdid == 0),
[624]1757"main thread must have index 0 for <%s>", CONFIG_PROCESS_INIT_PATH );
[457]1758
[564]1759#if(DEBUG_PROCESS_INIT_CREATE & 1)
1760if( DEBUG_PROCESS_INIT_CREATE < cycle )
[610]1761printk("\n[%s] thread[%x,%x] created main thread\n",
1762__FUNCTION__, this->process->pid, this->trdid );
[564]1763#endif
1764
[409]1765    // activate thread
1766        thread_unblock( XPTR( local_cxy , thread ) , THREAD_BLOCKED_GLOBAL );
1767
[124]1768    hal_fence();
[1]1769
[438]1770#if DEBUG_PROCESS_INIT_CREATE
[433]1771cycle = (uint32_t)hal_get_cycles();
[438]1772if( DEBUG_PROCESS_INIT_CREATE < cycle )
[610]1773printk("\n[%s] thread[%x,%x] exit / cycle %d\n",
1774__FUNCTION__, this->process->pid, this->trdid, cycle );
[433]1775#endif
[409]1776
[204]1777}  // end process_init_create()
1778
[428]1779/////////////////////////////////////////
1780void process_display( xptr_t process_xp )
1781{
1782    process_t   * process_ptr;
1783    cxy_t         process_cxy;
[443]1784
[428]1785    xptr_t        parent_xp;       // extended pointer on parent process
1786    process_t   * parent_ptr;
1787    cxy_t         parent_cxy;
1788
[443]1789    xptr_t        owner_xp;        // extended pointer on owner process
1790    process_t   * owner_ptr;
1791    cxy_t         owner_cxy;
1792
[428]1793    pid_t         pid;
1794    pid_t         ppid;
[580]1795    lpid_t        lpid;
[428]1796    uint32_t      state;
1797    uint32_t      th_nr;
1798
[443]1799    xptr_t        txt_file_xp;     // extended pointer on TXT_RX file descriptor
1800    xptr_t        txt_chdev_xp;    // extended pointer on TXT_RX chdev
1801    chdev_t     * txt_chdev_ptr;
1802    cxy_t         txt_chdev_cxy;
1803    xptr_t        txt_owner_xp;    // extended pointer on TXT owner process
[428]1804
1805    xptr_t        elf_file_xp;     // extended pointer on .elf file
1806    cxy_t         elf_file_cxy;
1807    vfs_file_t  * elf_file_ptr;
1808    vfs_inode_t * elf_inode_ptr;   // local pointer on .elf inode
1809
1810    char          txt_name[CONFIG_VFS_MAX_NAME_LENGTH];
1811    char          elf_name[CONFIG_VFS_MAX_NAME_LENGTH];
1812
1813    // get cluster and local pointer on process
1814    process_ptr = GET_PTR( process_xp );
1815    process_cxy = GET_CXY( process_xp );
1816
[580]1817    // get process PID, LPID, and state
[564]1818    pid   = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid ) );
[580]1819    lpid  = LPID_FROM_PID( pid );
[564]1820    state = hal_remote_l32( XPTR( process_cxy , &process_ptr->term_state ) );
[428]1821
[580]1822    // get process PPID
[564]1823    parent_xp  = hal_remote_l64( XPTR( process_cxy , &process_ptr->parent_xp ) );
[428]1824    parent_cxy = GET_CXY( parent_xp );
1825    parent_ptr = GET_PTR( parent_xp );
[564]1826    ppid       = hal_remote_l32( XPTR( parent_cxy , &parent_ptr->pid ) );
[428]1827
1828    // get number of threads
[564]1829    th_nr      = hal_remote_l32( XPTR( process_cxy , &process_ptr->th_nr ) );
[428]1830
[443]1831    // get pointers on owner process descriptor
[564]1832    owner_xp  = hal_remote_l64( XPTR( process_cxy , &process_ptr->owner_xp ) );
[443]1833    owner_cxy = GET_CXY( owner_xp );
1834    owner_ptr = GET_PTR( owner_xp );
[428]1835
[580]1836    // get process TXT name and .elf name
1837    if( lpid )                                   // user process
1838    {
[443]1839
[580]1840        // get extended pointer on file descriptor associated to TXT_RX
1841        txt_file_xp = hal_remote_l64( XPTR( owner_cxy , &owner_ptr->fd_array.array[0] ) );
[428]1842
[580]1843        assert( (txt_file_xp != XPTR_NULL) ,
[624]1844        "process must be attached to one TXT terminal" ); 
[443]1845
[580]1846        // get TXT_RX chdev pointers
1847        txt_chdev_xp  = chdev_from_file( txt_file_xp );
1848        txt_chdev_cxy = GET_CXY( txt_chdev_xp );
1849        txt_chdev_ptr = GET_PTR( txt_chdev_xp );
1850
1851        // get TXT_RX name and ownership
1852        hal_remote_strcpy( XPTR( local_cxy , txt_name ) ,
1853                           XPTR( txt_chdev_cxy , txt_chdev_ptr->name ) );
[428]1854   
[580]1855        txt_owner_xp = (xptr_t)hal_remote_l64( XPTR( txt_chdev_cxy, 
1856                                                     &txt_chdev_ptr->ext.txt.owner_xp ) );
[428]1857
[580]1858        // get process .elf name
1859        elf_file_xp   = hal_remote_l64( XPTR( process_cxy , &process_ptr->vfs_bin_xp ) );
1860        elf_file_cxy  = GET_CXY( elf_file_xp );
1861        elf_file_ptr  = GET_PTR( elf_file_xp );
1862        elf_inode_ptr = hal_remote_lpt( XPTR( elf_file_cxy , &elf_file_ptr->inode ) );
1863        vfs_inode_get_name( XPTR( elf_file_cxy , elf_inode_ptr ) , elf_name );
1864    }
1865    else                                         // kernel process_zero
1866    {
1867        // TXT name and .elf name are not registered in kernel process_zero
1868        strcpy( txt_name , "txt0_rx" );
1869        txt_owner_xp = process_xp; 
1870        strcpy( elf_name , "kernel.elf" );
1871    }
1872
[428]1873    // display process info
[443]1874    if( txt_owner_xp == process_xp )
[428]1875    {
[581]1876        nolock_printk("PID %X | %s (FG) | %X | PPID %X | TS %X | %d | %s\n", 
1877        pid, txt_name, process_ptr, ppid, state, th_nr, elf_name );
[428]1878    }
1879    else
1880    {
[581]1881        nolock_printk("PID %X | %s (BG) | %X | PPID %X | TS %X | %d | %s\n", 
1882        pid, txt_name, process_ptr, ppid, state, th_nr, elf_name );
[428]1883    }
1884}  // end process_display()
1885
1886
1887////////////////////////////////////////////////////////////////////////////////////////
1888//     Terminals related functions
1889////////////////////////////////////////////////////////////////////////////////////////
1890
[581]1891//////////////////////////////////
[485]1892uint32_t process_txt_alloc( void )
[428]1893{
1894    uint32_t  index;       // TXT terminal index
1895    xptr_t    chdev_xp;    // extended pointer on TXT_RX chdev
1896    chdev_t * chdev_ptr;   // local pointer on TXT_RX chdev
1897    cxy_t     chdev_cxy;   // TXT_RX chdev cluster
1898    xptr_t    root_xp;     // extended pointer on owner field in chdev
1899
1900    // scan the user TXT_RX chdevs (TXT0 is reserved for kernel)
1901    for( index = 1 ; index < LOCAL_CLUSTER->nb_txt_channels ; index ++ )
1902    {
1903        // get pointers on TXT_RX[index]
1904        chdev_xp  = chdev_dir.txt_rx[index];
1905        chdev_cxy = GET_CXY( chdev_xp );
1906        chdev_ptr = GET_PTR( chdev_xp );
1907
1908        // get extended pointer on root of attached process
1909        root_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.root );
1910
1911        // return free TXT index if found
1912        if( xlist_is_empty( root_xp ) ) return index; 
1913    }
1914
[492]1915    assert( false , "no free TXT terminal found" );
[428]1916
1917    return -1;
1918
1919} // end process_txt_alloc()
1920
1921/////////////////////////////////////////////
1922void process_txt_attach( process_t * process,
1923                         uint32_t    txt_id )
1924{
1925    xptr_t      chdev_xp;     // extended pointer on TXT_RX chdev
1926    cxy_t       chdev_cxy;    // TXT_RX chdev cluster
1927    chdev_t *   chdev_ptr;    // local pointer on TXT_RX chdev
1928    xptr_t      root_xp;      // extended pointer on list root in chdev
1929    xptr_t      lock_xp;      // extended pointer on list lock in chdev
1930
[564]1931// check process is in owner cluster
1932assert( (CXY_FROM_PID( process->pid ) == local_cxy) ,
1933"process descriptor not in owner cluster" );
[428]1934
[564]1935// check terminal index
1936assert( (txt_id < LOCAL_CLUSTER->nb_txt_channels) ,
1937"illegal TXT terminal index" );
[428]1938
1939    // get pointers on TXT_RX[txt_id] chdev
1940    chdev_xp  = chdev_dir.txt_rx[txt_id];
1941    chdev_cxy = GET_CXY( chdev_xp );
1942    chdev_ptr = GET_PTR( chdev_xp );
1943
1944    // get extended pointer on root & lock of attached process list
1945    root_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.root );
1946    lock_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.lock );
1947
[564]1948    // get lock protecting list of processes attached to TXT
1949    remote_busylock_acquire( lock_xp );
1950
[428]1951    // insert process in attached process list
1952    xlist_add_last( root_xp , XPTR( local_cxy , &process->txt_list ) );
1953
[564]1954    // release lock protecting list of processes attached to TXT
1955    remote_busylock_release( lock_xp );
1956
[446]1957#if DEBUG_PROCESS_TXT
[610]1958thread_t * this = CURRENT_THREAD;
[457]1959uint32_t cycle = (uint32_t)hal_get_cycles();
[446]1960if( DEBUG_PROCESS_TXT < cycle )
[610]1961printk("\n[%s] thread[%x,%x] attached process %x to TXT %d / cycle %d\n",
1962__FUNCTION__, this->process->pid, this->trdid, process->pid, txt_id , cycle );
[433]1963#endif
[428]1964
1965} // end process_txt_attach()
1966
[436]1967/////////////////////////////////////////////
1968void process_txt_detach( xptr_t  process_xp )
[428]1969{
[436]1970    process_t * process_ptr;  // local pointer on process in owner cluster
1971    cxy_t       process_cxy;  // process owner cluster
1972    pid_t       process_pid;  // process identifier
1973    xptr_t      file_xp;      // extended pointer on stdin file
[428]1974    xptr_t      chdev_xp;     // extended pointer on TXT_RX chdev
1975    cxy_t       chdev_cxy;    // TXT_RX chdev cluster
1976    chdev_t *   chdev_ptr;    // local pointer on TXT_RX chdev
1977    xptr_t      lock_xp;      // extended pointer on list lock in chdev
1978
[436]1979    // get process cluster, local pointer, and PID
1980    process_cxy = GET_CXY( process_xp );
1981    process_ptr = GET_PTR( process_xp );
[564]1982    process_pid = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid ) );
[436]1983
[564]1984// check process descriptor in owner cluster
1985assert( (CXY_FROM_PID( process_pid ) == process_cxy ) ,
1986"process descriptor not in owner cluster" );
[436]1987
1988    // release TXT ownership (does nothing if not TXT owner)
1989    process_txt_transfer_ownership( process_xp );
[428]1990
[436]1991    // get extended pointer on process stdin file
[564]1992    file_xp = (xptr_t)hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[0] ) );
[436]1993
1994    // get pointers on TXT_RX chdev
1995    chdev_xp  = chdev_from_file( file_xp );
[428]1996    chdev_cxy = GET_CXY( chdev_xp );
1997    chdev_ptr = (chdev_t *)GET_PTR( chdev_xp );
1998
[436]1999    // get extended pointer on lock protecting attached process list
[428]2000    lock_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.lock );
2001
[564]2002    // get lock protecting list of processes attached to TXT
2003    remote_busylock_acquire( lock_xp );
2004
[428]2005    // unlink process from attached process list
[436]2006    xlist_unlink( XPTR( process_cxy , &process_ptr->txt_list ) );
2007
[564]2008    // release lock protecting list of processes attached to TXT
2009    remote_busylock_release( lock_xp );
2010
[446]2011#if DEBUG_PROCESS_TXT
[610]2012thread_t * this = CURRENT_THREAD;
[457]2013uint32_t cycle  = (uint32_t)hal_get_cycles();
[564]2014uint32_t txt_id = hal_remote_l32( XPTR( chdev_cxy , &chdev_ptr->channel ) );
[446]2015if( DEBUG_PROCESS_TXT < cycle )
[610]2016printk("\n[%s] thread[%x,%x] detached process %x from TXT %d / cycle %d\n",
2017__FUNCTION__, this->process->pid, this->trdid, process_pid, txt_id, cycle );
[433]2018#endif
[428]2019
2020} // end process_txt_detach()
2021
2022///////////////////////////////////////////////////
2023void process_txt_set_ownership( xptr_t process_xp )
2024{
2025    process_t * process_ptr;
2026    cxy_t       process_cxy;
[436]2027    pid_t       process_pid;
[428]2028    xptr_t      file_xp;
2029    xptr_t      txt_xp;     
2030    chdev_t   * txt_ptr;
2031    cxy_t       txt_cxy;
2032
[436]2033    // get pointers on process in owner cluster
[428]2034    process_cxy = GET_CXY( process_xp );
[435]2035    process_ptr = GET_PTR( process_xp );
[564]2036    process_pid = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid ) );
[436]2037
2038    // check owner cluster
[492]2039    assert( (process_cxy == CXY_FROM_PID( process_pid )) ,
[624]2040    "process descriptor not in owner cluster" );
[436]2041
[428]2042    // get extended pointer on stdin pseudo file
[564]2043    file_xp = hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[0] ) );
[428]2044
2045    // get pointers on TXT chdev
2046    txt_xp  = chdev_from_file( file_xp );
2047    txt_cxy = GET_CXY( txt_xp );
[435]2048    txt_ptr = GET_PTR( txt_xp );
[428]2049
2050    // set owner field in TXT chdev
[564]2051    hal_remote_s64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , process_xp );
[428]2052
[446]2053#if DEBUG_PROCESS_TXT
[610]2054thread_t * this = CURRENT_THREAD;
[457]2055uint32_t cycle  = (uint32_t)hal_get_cycles();
[564]2056uint32_t txt_id = hal_remote_l32( XPTR( txt_cxy , &txt_ptr->channel ) );
[446]2057if( DEBUG_PROCESS_TXT < cycle )
[610]2058printk("\n[%s] thread[%x,%x] give TXT %d to process %x / cycle %d\n",
2059__FUNCTION__, this->process->pid, this->trdid, txt_id, process_pid, cycle );
[436]2060#endif
2061
[428]2062}  // end process_txt_set ownership()
2063
[436]2064////////////////////////////////////////////////////////
2065void process_txt_transfer_ownership( xptr_t process_xp )
[428]2066{
[436]2067    process_t * process_ptr;     // local pointer on process releasing ownership
2068    cxy_t       process_cxy;     // process cluster
2069    pid_t       process_pid;     // process identifier
[428]2070    xptr_t      file_xp;         // extended pointer on TXT_RX pseudo file
2071    xptr_t      txt_xp;          // extended pointer on TXT_RX chdev
[433]2072    chdev_t   * txt_ptr;         // local pointer on TXT_RX chdev
2073    cxy_t       txt_cxy;         // cluster of TXT_RX chdev
2074    uint32_t    txt_id;          // TXT_RX channel
[428]2075    xptr_t      owner_xp;        // extended pointer on current TXT_RX owner
2076    xptr_t      root_xp;         // extended pointer on root of attached process list
[436]2077    xptr_t      lock_xp;         // extended pointer on lock protecting attached process list
[428]2078    xptr_t      iter_xp;         // iterator for xlist
2079    xptr_t      current_xp;      // extended pointer on current process
[433]2080    process_t * current_ptr;     // local pointer on current process
2081    cxy_t       current_cxy;     // cluster for current process
[428]2082
[457]2083#if DEBUG_PROCESS_TXT
[610]2084thread_t * this  = CURRENT_THREAD;
2085uint32_t   cycle;
[457]2086#endif
2087
[436]2088    // get pointers on process in owner cluster
[428]2089    process_cxy = GET_CXY( process_xp );
[435]2090    process_ptr = GET_PTR( process_xp );
[564]2091    process_pid = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid ) );
[436]2092
2093    // check owner cluster
[492]2094    assert( (process_cxy == CXY_FROM_PID( process_pid )) ,
[624]2095    "process descriptor not in owner cluster" );
[436]2096
[428]2097    // get extended pointer on stdin pseudo file
[564]2098    file_xp = hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[0] ) );
[428]2099
2100    // get pointers on TXT chdev
2101    txt_xp  = chdev_from_file( file_xp );
2102    txt_cxy = GET_CXY( txt_xp );
[433]2103    txt_ptr = GET_PTR( txt_xp );
[428]2104
[433]2105    // get extended pointer on TXT_RX owner and TXT channel
[564]2106    owner_xp = hal_remote_l64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) );
2107    txt_id   = hal_remote_l32 ( XPTR( txt_cxy , &txt_ptr->channel ) );
[428]2108
[436]2109    // transfer ownership only if process is the TXT owner
2110    if( (owner_xp == process_xp) && (txt_id > 0) ) 
[428]2111    {
[436]2112        // get extended pointers on root and lock of attached processes list
2113        root_xp = XPTR( txt_cxy , &txt_ptr->ext.txt.root );
2114        lock_xp = XPTR( txt_cxy , &txt_ptr->ext.txt.lock );
[428]2115
[436]2116        // get lock
[564]2117        remote_busylock_acquire( lock_xp );
[436]2118
2119        if( process_get_ppid( process_xp ) != 1 )           // process is not KSH
[428]2120        {
[436]2121            // scan attached process list to find KSH process
2122            XLIST_FOREACH( root_xp , iter_xp )
2123            {
2124                current_xp  = XLIST_ELEMENT( iter_xp , process_t , txt_list );
2125                current_cxy = GET_CXY( current_xp );
2126                current_ptr = GET_PTR( current_xp );
[435]2127
[436]2128                if( process_get_ppid( current_xp ) == 1 )  // current is KSH
2129                {
2130                    // release lock
[564]2131                    remote_busylock_release( lock_xp );
[436]2132
2133                    // set owner field in TXT chdev
[564]2134                    hal_remote_s64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , current_xp );
[436]2135
[446]2136#if DEBUG_PROCESS_TXT
[610]2137cycle = (uint32_t)hal_get_cycles();
[564]2138uint32_t ksh_pid = hal_remote_l32( XPTR( current_cxy , &current_ptr->pid ) );
[446]2139if( DEBUG_PROCESS_TXT < cycle )
[610]2140printk("\n[%s] thread[%x,%x] release TXT %d to KSH %x / cycle %d\n",
2141__FUNCTION__, this->process->pid, this->trdid, txt_id, ksh_pid, cycle );
[457]2142process_txt_display( txt_id );
[436]2143#endif
2144                     return;
2145                }
2146            }
2147 
2148            // release lock
[564]2149            remote_busylock_release( lock_xp );
[436]2150
2151            // PANIC if KSH not found
[492]2152            assert( false , "KSH process not found for TXT %d" );
[436]2153
2154            return;
2155        }
2156        else                                               // process is KSH
2157        {
2158            // scan attached process list to find another process
2159            XLIST_FOREACH( root_xp , iter_xp )
[428]2160            {
[436]2161                current_xp  = XLIST_ELEMENT( iter_xp , process_t , txt_list );
2162                current_cxy = GET_CXY( current_xp );
2163                current_ptr = GET_PTR( current_xp );
2164
2165                if( current_xp != process_xp )            // current is not KSH
2166                {
2167                    // release lock
[564]2168                    remote_busylock_release( lock_xp );
[436]2169
2170                    // set owner field in TXT chdev
[564]2171                    hal_remote_s64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , current_xp );
[436]2172
[446]2173#if DEBUG_PROCESS_TXT
[610]2174cycle  = (uint32_t)hal_get_cycles();
[564]2175uint32_t new_pid = hal_remote_l32( XPTR( current_cxy , &current_ptr->pid ) );
[446]2176if( DEBUG_PROCESS_TXT < cycle )
[610]2177printk("\n[%s] thread[%x,%x] release TXT %d to process %x / cycle %d\n",
2178__FUNCTION__,this->process->pid, this->trdid, txt_id, new_pid, cycle );
[457]2179process_txt_display( txt_id );
[436]2180#endif
2181                     return;
2182                }
[428]2183            }
[436]2184
2185            // release lock
[564]2186            remote_busylock_release( lock_xp );
[436]2187
2188            // no more owner for TXT if no other process found
[564]2189            hal_remote_s64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , XPTR_NULL );
[436]2190
[446]2191#if DEBUG_PROCESS_TXT
[436]2192cycle = (uint32_t)hal_get_cycles();
[446]2193if( DEBUG_PROCESS_TXT < cycle )
[610]2194printk("\n[%s] thread[%x,%x] release TXT %d to nobody / cycle %d\n",
2195__FUNCTION__, this->process->pid, this->trdid, txt_id, cycle );
[457]2196process_txt_display( txt_id );
[436]2197#endif
2198            return;
[428]2199        }
[436]2200    }
2201    else
2202    {
[433]2203
[446]2204#if DEBUG_PROCESS_TXT
[436]2205cycle = (uint32_t)hal_get_cycles();
[446]2206if( DEBUG_PROCESS_TXT < cycle )
[593]2207printk("\n[%s] thread %x in process %d does nothing (not TXT owner) / cycle %d\n",
[610]2208__FUNCTION__, this->trdid, process_pid, cycle );
[457]2209process_txt_display( txt_id );
[436]2210#endif
2211
[428]2212    }
[436]2213}  // end process_txt_transfer_ownership()
[428]2214
2215
[564]2216////////////////////////////////////////////////
2217bool_t process_txt_is_owner( xptr_t process_xp )
[457]2218{
2219    // get local pointer and cluster of process in owner cluster
2220    cxy_t       process_cxy = GET_CXY( process_xp );
2221    process_t * process_ptr = GET_PTR( process_xp );
2222
[564]2223// check calling thread execute in target process owner cluster
2224pid_t process_pid = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid ) );
2225assert( (process_cxy == CXY_FROM_PID( process_pid )) ,
[624]2226"process descriptor not in owner cluster" );
[457]2227
2228    // get extended pointer on stdin pseudo file
[564]2229    xptr_t file_xp = hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[0] ) );
[457]2230
2231    // get pointers on TXT chdev
2232    xptr_t    txt_xp  = chdev_from_file( file_xp );
2233    cxy_t     txt_cxy = GET_CXY( txt_xp );
2234    chdev_t * txt_ptr = GET_PTR( txt_xp );
2235
2236    // get extended pointer on TXT_RX owner process
[564]2237    xptr_t owner_xp = hal_remote_l64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) );
[457]2238
2239    return (process_xp == owner_xp);
2240
2241}   // end process_txt_is_owner()
2242
[436]2243////////////////////////////////////////////////     
2244xptr_t process_txt_get_owner( uint32_t channel )
[435]2245{
2246    xptr_t      txt_rx_xp  = chdev_dir.txt_rx[channel];
2247    cxy_t       txt_rx_cxy = GET_CXY( txt_rx_xp );
2248    chdev_t *   txt_rx_ptr = GET_PTR( txt_rx_xp );
2249
[564]2250    return (xptr_t)hal_remote_l64( XPTR( txt_rx_cxy , &txt_rx_ptr->ext.txt.owner_xp ) );
[435]2251
[457]2252}  // end process_txt_get_owner()
2253
[435]2254///////////////////////////////////////////
2255void process_txt_display( uint32_t txt_id )
2256{
2257    xptr_t      chdev_xp;
2258    cxy_t       chdev_cxy;
2259    chdev_t   * chdev_ptr;
2260    xptr_t      root_xp;
2261    xptr_t      lock_xp;
2262    xptr_t      current_xp;
2263    xptr_t      iter_xp;
[443]2264    cxy_t       txt0_cxy;
2265    chdev_t   * txt0_ptr;
2266    xptr_t      txt0_xp;
2267    xptr_t      txt0_lock_xp;
2268   
[435]2269    assert( (txt_id < LOCAL_CLUSTER->nb_txt_channels) ,
[492]2270    "illegal TXT terminal index" );
[435]2271
[443]2272    // get pointers on TXT0 chdev
2273    txt0_xp  = chdev_dir.txt_tx[0];
2274    txt0_cxy = GET_CXY( txt0_xp );
2275    txt0_ptr = GET_PTR( txt0_xp );
2276
2277    // get extended pointer on TXT0 lock
2278    txt0_lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock );
2279
[435]2280    // get pointers on TXT_RX[txt_id] chdev
2281    chdev_xp  = chdev_dir.txt_rx[txt_id];
2282    chdev_cxy = GET_CXY( chdev_xp );
2283    chdev_ptr = GET_PTR( chdev_xp );
2284
2285    // get extended pointer on root & lock of attached process list
2286    root_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.root );
2287    lock_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.lock );
2288
[443]2289    // get lock on attached process list
[564]2290    remote_busylock_acquire( lock_xp );
[443]2291
2292    // get TXT0 lock in busy waiting mode
[564]2293    remote_busylock_acquire( txt0_lock_xp );
[443]2294
[435]2295    // display header
[443]2296    nolock_printk("\n***** processes attached to TXT_%d / cycle %d\n",
2297    txt_id , (uint32_t)hal_get_cycles() );
[435]2298
[436]2299    // scan attached process list
[435]2300    XLIST_FOREACH( root_xp , iter_xp )
2301    {
2302        current_xp  = XLIST_ELEMENT( iter_xp , process_t , txt_list );
2303        process_display( current_xp );
2304    }
2305
[443]2306    // release TXT0 lock in busy waiting mode
[564]2307    remote_busylock_release( txt0_lock_xp );
[443]2308
2309    // release lock on attached process list
[564]2310    remote_busylock_release( lock_xp );
[435]2311
2312}  // end process_txt_display
Note: See TracBrowser for help on using the repository browser.