source: trunk/kernel/kern/process.c @ 445

Last change on this file since 445 was 445, checked in by alain, 6 years ago

Restructure the mini_libc.

File size: 72.0 KB
RevLine 
[1]1/*
2 * process.c - process related management
[172]3 *
[1]4 * Authors  Ghassan Almaless (2008,2009,2010,2011,2012)
5 *          Mohamed Lamine Karaoui (2015)
[433]6 *          Alain Greiner (2016,2017,2018)
[1]7 *
8 * Copyright (c) UPMC Sorbonne Universites
9 *
[409]10 * This file is part of ALMOS-MKH.
[1]11 *
[172]12 * ALMOS-MKH is free software; you can redistribute it and/or modify it
[1]13 * under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; version 2.0 of the License.
15 *
[172]16 * ALMOS-MKH is distributed in the hope that it will be useful, but
[1]17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
[172]22 * along with ALMOS-MKH; if not, write to the Free Software Foundation,
[1]23 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24 */
25
[14]26#include <kernel_config.h>
[1]27#include <hal_types.h>
28#include <hal_remote.h>
29#include <hal_uspace.h>
[409]30#include <hal_irqmask.h>
[1]31#include <errno.h>
32#include <printk.h>
33#include <memcpy.h>
34#include <bits.h>
35#include <kmem.h>
36#include <page.h>
37#include <vmm.h>
38#include <vfs.h>
39#include <core.h>
40#include <thread.h>
[428]41#include <chdev.h>
[1]42#include <list.h>
[407]43#include <string.h>
[1]44#include <scheduler.h>
45#include <remote_spinlock.h>
46#include <dqdt.h>
47#include <cluster.h>
48#include <ppm.h>
49#include <boot_info.h>
50#include <process.h>
51#include <elf.h>
[23]52#include <syscalls.h>
[435]53#include <shared_syscalls.h>
[1]54
55//////////////////////////////////////////////////////////////////////////////////////////
56// Extern global variables
57//////////////////////////////////////////////////////////////////////////////////////////
58
[428]59extern process_t           process_zero;     // allocated in kernel_init.c
60extern chdev_directory_t   chdev_dir;        // allocated in kernel_init.c
[1]61
62//////////////////////////////////////////////////////////////////////////////////////////
63// Process initialisation related functions
64//////////////////////////////////////////////////////////////////////////////////////////
65
66///////////////////////////
67process_t * process_alloc()
68{
69        kmem_req_t   req;
70
71    req.type  = KMEM_PROCESS;
72        req.size  = sizeof(process_t);
73        req.flags = AF_KERNEL;
74
75    return (process_t *)kmem_alloc( &req );
76}
77
78////////////////////////////////////////
79void process_free( process_t * process )
80{
81    kmem_req_t  req;
82
83        req.type = KMEM_PROCESS;
84        req.ptr  = process;
85        kmem_free( &req );
86}
87
[101]88/////////////////////////////////////////////////
89void process_reference_init( process_t * process,
90                             pid_t       pid,
[428]91                             xptr_t      parent_xp,
[408]92                             xptr_t      model_xp )
[1]93{
[428]94    cxy_t       parent_cxy;
95    process_t * parent_ptr;
[408]96    cxy_t       model_cxy;
97    process_t * model_ptr;
[407]98    xptr_t      stdin_xp;
99    xptr_t      stdout_xp;
100    xptr_t      stderr_xp;
101    uint32_t    stdin_id;
102    uint32_t    stdout_id;
103    uint32_t    stderr_id;
[415]104    error_t     error;
[428]105    uint32_t    txt_id;
106    char        rx_path[40];
107    char        tx_path[40];
[440]108    xptr_t      file_xp;
[428]109    xptr_t      chdev_xp;
110    chdev_t *   chdev_ptr;
111    cxy_t       chdev_cxy;
112    pid_t       model_pid;
113    pid_t       parent_pid;
[1]114
[408]115    // get model process cluster and local pointer
116    model_cxy = GET_CXY( model_xp );
[435]117    model_ptr = GET_PTR( model_xp );
[1]118
[428]119    // get parent process cluster and local pointer
120    parent_cxy = GET_CXY( parent_xp );
[435]121    parent_ptr = GET_PTR( parent_xp );
[204]122
[428]123    // get model_pid and parent_pid
124    parent_pid = hal_remote_lw( XPTR( parent_cxy , &parent_ptr->pid ) );
125    model_pid  = hal_remote_lw( XPTR( model_cxy  , &model_ptr->pid ) );
126
[438]127#if DEBUG_PROCESS_REFERENCE_INIT
[433]128uint32_t cycle = (uint32_t)hal_get_cycles();
[438]129if( DEBUG_PROCESS_REFERENCE_INIT )
[433]130printk("\n[DBG] %s : thread %x enter / pid = %x / ppid = %x / model_pid = %x / cycle %d\n",
131__FUNCTION__ , CURRENT_THREAD , pid , parent_pid , model_pid , cycle );
132#endif
[428]133
134    // initialize PID, REF_XP, PARENT_XP, and STATE
[433]135        process->pid        = pid;
136    process->ref_xp     = XPTR( local_cxy , process );
[443]137    process->owner_xp   = XPTR( local_cxy , process );
[433]138    process->parent_xp  = parent_xp;
139    process->term_state = 0;
[428]140
[409]141    // initialize vmm as empty
[415]142    error = vmm_init( process );
143    assert( (error == 0) , __FUNCTION__ , "cannot initialize VMM\n" );
144 
[438]145#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
[433]146cycle = (uint32_t)hal_get_cycles();
[438]147if( DEBUG_PROCESS_REFERENCE_INIT )
[433]148printk("\n[DBG] %s : thread %x / vmm empty for process %x / cycle %d\n", 
149__FUNCTION__ , CURRENT_THREAD , pid , cycle );
150#endif
[1]151
[409]152    // initialize fd_array as empty
[408]153    process_fd_init( process );
[1]154
[428]155    // define the stdin/stdout/stderr pseudo files <=> select a TXT terminal.
156    // - if INIT (pid == 1)         => link to kernel TXT[0]
157    // - if KSH[i] (model_pid == 1) => allocate a free TXT[i]
158    // - if USER process            => same terminal as model
159
160    if( (pid == 1) || (model_pid == 1)) // INIT or KSH process
[408]161    {
[428]162        if (pid == 1 )  txt_id = 0;                    // INIT
163        else            txt_id = process_txt_alloc();  // KSH[i]
164
165        // attach process to TXT[txt_id]
166        process_txt_attach( process , txt_id ); 
167
168        // build path to TXT_RX[i] and TXT_TX[i] chdevs
169        snprintf( rx_path , 40 , "/dev/external/txt%d_rx", txt_id );
170        snprintf( tx_path , 40 , "/dev/external/txt%d_tx", txt_id );
171
172        // create stdin pseudo file         
173        error = vfs_open( process,
174                           rx_path,
[408]175                           O_RDONLY, 
176                           0,                // FIXME chmod
177                           &stdin_xp, 
178                           &stdin_id );
[1]179
[428]180        assert( (error == 0) , __FUNCTION__ , "cannot open stdin pseudo file" );
181        assert( (stdin_id == 0) , __FUNCTION__ , "stdin index must be 0" );
182
[440]183#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
184cycle = (uint32_t)hal_get_cycles();
185if( DEBUG_PROCESS_REFERENCE_INIT )
186printk("\n[DBG] %s : thread %x / stdin open for process %x / cycle %d\n", 
187__FUNCTION__ , CURRENT_THREAD , pid , cycle );
188#endif
189
[428]190        // create stdout pseudo file         
191        error = vfs_open( process,
192                           tx_path,
[408]193                           O_WRONLY, 
194                           0,                // FIXME chmod
195                           &stdout_xp, 
196                           &stdout_id );
[1]197
[428]198        assert( (error == 0) , __FUNCTION__ , "cannot open stdout pseudo file" );
199        assert( (stdout_id == 1) , __FUNCTION__ , "stdout index must be 1" );
200
[440]201#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
202cycle = (uint32_t)hal_get_cycles();
203if( DEBUG_PROCESS_REFERENCE_INIT )
204printk("\n[DBG] %s : thread %x / stdout open for process %x / cycle %d\n", 
205__FUNCTION__ , CURRENT_THREAD , pid , cycle );
206#endif
207
[428]208        // create stderr pseudo file         
209        error = vfs_open( process,
210                           tx_path,
[408]211                           O_WRONLY, 
212                           0,                // FIXME chmod
213                           &stderr_xp, 
214                           &stderr_id );
[428]215
216        assert( (error == 0) , __FUNCTION__ , "cannot open stderr pseudo file" );
217        assert( (stderr_id == 2) , __FUNCTION__ , "stderr index must be 2" );
218
[440]219#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
220cycle = (uint32_t)hal_get_cycles();
221if( DEBUG_PROCESS_REFERENCE_INIT )
222printk("\n[DBG] %s : thread %x / stderr open for process %x / cycle %d\n", 
223__FUNCTION__ , CURRENT_THREAD , pid , cycle );
224#endif
225
[408]226    }
[428]227    else                                            // normal user process
[408]228    {
[440]229        // get extended pointer on stdin pseudo file in model process
230        file_xp = (xptr_t)hal_remote_lwd( XPTR( model_cxy , &model_ptr->fd_array.array[0] ) );
231
[428]232        // get extended pointer on model process TXT chdev
[440]233        chdev_xp = chdev_from_file( file_xp );
[428]234 
235        // get cluster and local pointer on chdev
236        chdev_cxy = GET_CXY( chdev_xp );
[435]237        chdev_ptr = GET_PTR( chdev_xp );
[428]238 
239        // get TXT terminal index
240        txt_id = hal_remote_lw( XPTR( chdev_cxy , &chdev_ptr->channel ) );
[407]241
[428]242        // attach process to TXT[txt_id]
243        process_txt_attach( process , txt_id ); 
[407]244
[428]245        // copy all open files from model process fd_array to this process
246        process_fd_remote_copy( XPTR( local_cxy , &process->fd_array ),
247                                XPTR( model_cxy , &model_ptr->fd_array ) );
[408]248    }
[407]249
[409]250    // initialize specific inodes root and cwd
[408]251    process->vfs_root_xp = (xptr_t)hal_remote_lwd( XPTR( model_cxy,
252                                                         &model_ptr->vfs_root_xp ) );
253    process->vfs_cwd_xp  = (xptr_t)hal_remote_lwd( XPTR( model_cxy,
254                                                         &model_ptr->vfs_cwd_xp ) );
[409]255    vfs_inode_remote_up( process->vfs_root_xp );
256    vfs_inode_remote_up( process->vfs_cwd_xp );
[408]257
[409]258    remote_rwlock_init( XPTR( local_cxy , &process->cwd_lock ) );
259
[438]260#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
[433]261cycle = (uint32_t)hal_get_cycles();
[438]262if( DEBUG_PROCESS_REFERENCE_INIT )
[433]263printk("\n[DBG] %s : thread %x / fd_array for process %x / cycle %d\n", 
264__FUNCTION__ , CURRENT_THREAD , pid , cycle );
265#endif
[407]266
[408]267    // reset children list root
268    xlist_root_init( XPTR( local_cxy , &process->children_root ) );
269    process->children_nr     = 0;
[428]270    remote_spinlock_init( XPTR( local_cxy , &process->children_lock ) );
[407]271
[408]272    // reset semaphore / mutex / barrier / condvar list roots
273    xlist_root_init( XPTR( local_cxy , &process->sem_root ) );
274    xlist_root_init( XPTR( local_cxy , &process->mutex_root ) );
275    xlist_root_init( XPTR( local_cxy , &process->barrier_root ) );
276    xlist_root_init( XPTR( local_cxy , &process->condvar_root ) );
277    remote_spinlock_init( XPTR( local_cxy , &process->sync_lock ) );
[407]278
[408]279    // register new process in the local cluster manager pref_tbl[]
280    lpid_t lpid = LPID_FROM_PID( pid );
281    LOCAL_CLUSTER->pmgr.pref_tbl[lpid] = XPTR( local_cxy , process );
[407]282
[408]283    // register new process descriptor in local cluster manager local_list
284    cluster_process_local_link( process );
[407]285
[408]286    // register new process descriptor in local cluster manager copies_list
287    cluster_process_copies_link( process );
[172]288
[408]289    // reset th_tbl[] array as empty in process descriptor
[1]290    uint32_t i;
291    for( i = 0 ; i < CONFIG_THREAD_MAX_PER_CLUSTER ; i++ )
292        {
293        process->th_tbl[i] = NULL;
294    }
295    process->th_nr  = 0;
296    spinlock_init( &process->th_lock );
297
[124]298        hal_fence();
[1]299
[438]300#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
[433]301cycle = (uint32_t)hal_get_cycles();
[438]302if( DEBUG_PROCESS_REFERENCE_INIT )
[433]303printk("\n[DBG] %s : thread %x exit / process %x / cycle %d\n", 
304__FUNCTION__ , CURRENT_THREAD , pid , cycle );
305#endif
[101]306
[428]307}  // process_reference_init()
[204]308
[1]309/////////////////////////////////////////////////////
310error_t process_copy_init( process_t * local_process,
311                           xptr_t      reference_process_xp )
312{
[415]313    error_t error;
314
[23]315    // get reference process cluster and local pointer
316    cxy_t       ref_cxy = GET_CXY( reference_process_xp );
[435]317    process_t * ref_ptr = GET_PTR( reference_process_xp );
[1]318
[428]319    // initialize PID, REF_XP, PARENT_XP, and STATE
[433]320    local_process->pid        = hal_remote_lw(  XPTR( ref_cxy , &ref_ptr->pid ) );
321    local_process->parent_xp  = hal_remote_lwd( XPTR( ref_cxy , &ref_ptr->parent_xp ) );
322    local_process->ref_xp     = reference_process_xp;
[443]323    local_process->owner_xp   = reference_process_xp;
[433]324    local_process->term_state = 0;
[407]325
[438]326#if DEBUG_PROCESS_COPY_INIT
[433]327uint32_t cycle = (uint32_t)hal_get_cycles();
[438]328if( DEBUG_PROCESS_COPY_INIT )
[433]329printk("\n[DBG] %s : thread %x enter for process %x\n",
330__FUNCTION__ , CURRENT_THREAD , local_process->pid );
331#endif
[407]332
[172]333    // reset local process vmm
[415]334    error = vmm_init( local_process );
335    assert( (error == 0) , __FUNCTION__ , "cannot initialize VMM\n");
[1]336
[172]337    // reset process file descriptors array
[23]338        process_fd_init( local_process );
[1]339
[23]340    // reset vfs_root_xp / vfs_bin_xp / vfs_cwd_xp fields
341    local_process->vfs_root_xp = hal_remote_lwd( XPTR( ref_cxy , &ref_ptr->vfs_root_xp ) );
342    local_process->vfs_bin_xp  = hal_remote_lwd( XPTR( ref_cxy , &ref_ptr->vfs_bin_xp ) );
343    local_process->vfs_cwd_xp  = XPTR_NULL;
[1]344
345    // reset children list root (not used in a process descriptor copy)
346    xlist_root_init( XPTR( local_cxy , &local_process->children_root ) );
[172]347    local_process->children_nr   = 0;
[428]348    remote_spinlock_init( XPTR( local_cxy , &local_process->children_lock ) );
[1]349
[428]350    // reset children_list (not used in a process descriptor copy)
351    xlist_entry_init( XPTR( local_cxy , &local_process->children_list ) );
[1]352
353    // reset semaphores list root (not used in a process descriptor copy)
354    xlist_root_init( XPTR( local_cxy , &local_process->sem_root ) );
[23]355    xlist_root_init( XPTR( local_cxy , &local_process->mutex_root ) );
356    xlist_root_init( XPTR( local_cxy , &local_process->barrier_root ) );
357    xlist_root_init( XPTR( local_cxy , &local_process->condvar_root ) );
[1]358
[23]359    // reset th_tbl[] array as empty
[1]360    uint32_t i;
361    for( i = 0 ; i < CONFIG_THREAD_MAX_PER_CLUSTER ; i++ )
362        {
363        local_process->th_tbl[i] = NULL;
364    }
365    local_process->th_nr  = 0;
366    spinlock_init( &local_process->th_lock );
367
368    // register new process descriptor in local cluster manager local_list
369    cluster_process_local_link( local_process );
370
371    // register new process descriptor in owner cluster manager copies_list
372    cluster_process_copies_link( local_process );
373
[124]374        hal_fence();
[1]375
[438]376#if DEBUG_PROCESS_COPY_INIT
[433]377cycle = (uint32_t)hal_get_cycles();
[438]378if( DEBUG_PROCESS_COPY_INIT )
[433]379printk("\n[DBG] %s : thread %x exit for process %x\n",
380__FUNCTION__ , CURRENT_THREAD , local_process->pid );
381#endif
[279]382
[1]383    return 0;
384
[204]385} // end process_copy_init()
386
[1]387///////////////////////////////////////////
388void process_destroy( process_t * process )
389{
[428]390    xptr_t      parent_xp;
391    process_t * parent_ptr;
392    cxy_t       parent_cxy;
393    xptr_t      children_lock_xp;
[1]394
[437]395    pid_t       pid = process->pid;
396
[428]397        assert( (process->th_nr == 0) , __FUNCTION__ ,
[437]398    "process %x in cluster %x has still active threads", pid , local_cxy );
[428]399
[438]400#if DEBUG_PROCESS_DESTROY
[433]401uint32_t cycle = (uint32_t)hal_get_cycles();
[438]402if( DEBUG_PROCESS_DESTROY )
[445]403printk("\n[DBG] %s : thread %x enter for process %x in cluster %x / cycle %d\n",
404__FUNCTION__ , CURRENT_THREAD , pid , local_cxy , cycle );
[433]405#endif
[428]406
[436]407    // remove process from local_list in local cluster manager
408    cluster_process_local_unlink( process );
[1]409
[436]410    // remove process from copies_list in owner cluster manager
411    cluster_process_copies_unlink( process );
[23]412
[443]413    // remove process from children_list if process owner cluster
[437]414    if( CXY_FROM_PID( pid ) == local_cxy )
[428]415    {
416        // get pointers on parent process
417        parent_xp  = process->parent_xp;
418        parent_cxy = GET_CXY( parent_xp );
419        parent_ptr = GET_PTR( parent_xp );
420
421        // get extended pointer on children_lock in parent process
422        children_lock_xp = XPTR( parent_cxy , &parent_ptr->children_lock );
423
424        // remove process from children_list
425        remote_spinlock_lock( children_lock_xp );
426        xlist_unlink( XPTR( local_cxy , &process->children_list ) );
427        remote_spinlock_unlock( children_lock_xp );
428    }
429
[443]430    // release the process PID to cluster manager if process owner cluster
[440]431    if( CXY_FROM_PID( pid ) == local_cxy ) cluster_pid_release( pid );
[416]432
[409]433    // FIXME close all open files and update dirty [AG]
[23]434
[428]435    // decrease refcount for bin file, root file and cwd file
[337]436        if( process->vfs_bin_xp  != XPTR_NULL ) vfs_file_count_down( process->vfs_bin_xp );
437        if( process->vfs_root_xp != XPTR_NULL ) vfs_file_count_down( process->vfs_root_xp );
438        if( process->vfs_cwd_xp  != XPTR_NULL ) vfs_file_count_down( process->vfs_cwd_xp );
[1]439
440    // Destroy VMM
441    vmm_destroy( process );
442
[416]443    // release memory allocated to process descriptor
444    process_free( process );
[1]445
[438]446#if DEBUG_PROCESS_DESTROY
[433]447cycle = (uint32_t)hal_get_cycles();
[438]448if( DEBUG_PROCESS_DESTROY )
[445]449printk("\n[DBG] %s : thread %x exit / destroyed process %x in cluster %x / cycle %d\n",
450__FUNCTION__ , CURRENT_THREAD , pid, local_cxy, cycle );
[433]451#endif
[428]452
[407]453}  // end process_destroy()
454
[409]455/////////////////////////////////////////////////
456char * process_action_str( uint32_t action_type )
457{
458    if     ( action_type == BLOCK_ALL_THREADS   ) return "BLOCK";
459    else if( action_type == UNBLOCK_ALL_THREADS ) return "UNBLOCK";
460    else if( action_type == DELETE_ALL_THREADS  ) return "DELETE";
461    else                                          return "undefined";
462}
463
[435]464////////////////////////////////////////
465void process_sigaction( pid_t       pid,
[409]466                        uint32_t    action_type )
467{
468    cxy_t              owner_cxy;         // owner cluster identifier
469    lpid_t             lpid;              // process index in owner cluster
470    cluster_t        * cluster;           // pointer on cluster manager
471    xptr_t             root_xp;           // extended pointer on root of copies
472    xptr_t             lock_xp;           // extended pointer on lock protecting copies
473    xptr_t             iter_xp;           // iterator on copies list
474    xptr_t             process_xp;        // extended pointer on process copy
475    cxy_t              process_cxy;       // process copy cluster identifier
[436]476    reg_t              save_sr;           // for critical section
477    rpc_desc_t         rpc;               // shared RPC descriptor
[409]478
[435]479    thread_t * client = CURRENT_THREAD;
480
[438]481#if DEBUG_PROCESS_SIGACTION
[433]482uint32_t cycle = (uint32_t)hal_get_cycles();
[438]483if( DEBUG_PROCESS_SIGACTION < cycle )
[435]484printk("\n[DBG] %s : thread %x enter to %s process %x / cycle %d\n",
485__FUNCTION__ , client, process_action_str( action_type ) , pid , cycle );
[433]486#endif
[409]487
[436]488    // get pointer on local cluster manager
[416]489    cluster = LOCAL_CLUSTER;
490
[409]491    // get owner cluster identifier and process lpid
[435]492    owner_cxy = CXY_FROM_PID( pid );
493    lpid      = LPID_FROM_PID( pid );
[409]494
[435]495    // get root of list of copies, lock, and number of copies from owner cluster
[436]496    root_xp   = XPTR( owner_cxy , &cluster->pmgr.copies_root[lpid] );
497    lock_xp   = XPTR( owner_cxy , &cluster->pmgr.copies_lock[lpid] );
[435]498
[416]499    // check action type
500    assert( ((action_type == DELETE_ALL_THREADS ) ||
501             (action_type == BLOCK_ALL_THREADS )  ||
[428]502             (action_type == UNBLOCK_ALL_THREADS )), __FUNCTION__ , "illegal action type" );
[416]503             
[436]504    // allocate a - shared - RPC descriptor in client thread stack
505    // it can be shared because all parallel, non-blocking, server threads
506    // use the same input arguments, and use the shared RPC response field
[416]507
[436]508    // the client thread makes the following sequence:
509    // 1. mask interrupts
510    // 2. block itself
511    // 3. send RPC requests to all copies
512    // 4. unmask interrupts
513    // 5. deschedule
514
515    // mask IRQs
516    hal_disable_irq( &save_sr);
517
518    // client register blocking condition for itself
519    thread_block( XPTR( local_cxy , client ) , THREAD_BLOCKED_RPC );
520
[409]521    // take the lock protecting the copies
522    remote_spinlock_lock( lock_xp );
523
[436]524    // initialize shared RPC descriptor
[438]525    rpc.responses = 0;
526    rpc.blocking  = false;
527    rpc.index     = RPC_PROCESS_SIGACTION;
528    rpc.thread    = client;
529    rpc.lid       = client->core->lid;
530    rpc.args[0]   = action_type;
531    rpc.args[1]   = pid;
[436]532
533    // send RPCs to all clusters containing process copiess
[409]534    XLIST_FOREACH( root_xp , iter_xp )
535    {
[440]536        // atomically increment responses counter
537        hal_atomic_add( (void *)&rpc.responses , 1 );
[409]538
[440]539        process_xp  = XLIST_ELEMENT( iter_xp , process_t , copies_list );
540        process_cxy = GET_CXY( process_xp );
541
[438]542#if DEBUG_PROCESS_SIGACTION
543if( DEBUG_PROCESS_SIGACTION < cycle )
[436]544printk("\n[DBG] %s : send RPC to %s process %x in cluster %x\n",
545__FUNCTION__ , process_action_str( action_type ) , pid , process_cxy );
[433]546#endif
[436]547        // call RPC in target cluster
[435]548        rpc_process_sigaction_client( process_cxy , &rpc );
[409]549    }
550   
551    // release the lock protecting process copies
552    remote_spinlock_unlock( lock_xp );
553
[436]554    // restore IRQs
555    hal_restore_irq( save_sr);
[409]556
[440]557    // client thread deschedule : will be unblocked by the last RPC server thread
[436]558    sched_yield("blocked on rpc_process_sigaction");
[409]559
[438]560#if DEBUG_PROCESS_SIGACTION
[433]561cycle = (uint32_t)hal_get_cycles();
[438]562if( DEBUG_PROCESS_SIGACTION < cycle )
[433]563printk("\n[DBG] %s : thread %x exit after %s process %x in cluster %x / cycle %d\n",
[436]564__FUNCTION__ , client, process_action_str( action_type ) , pid , local_cxy , cycle );
[433]565#endif
[416]566
[409]567}  // end process_sigaction()
568
[433]569/////////////////////////////////////////////////
[440]570void process_block_threads( process_t * process,
571                            xptr_t      client_xp )
[1]572{
[409]573    thread_t          * target;         // pointer on target thread
[433]574    thread_t          * this;           // pointer on calling thread
[409]575    uint32_t            ltid;           // index in process th_tbl
[436]576    cxy_t               owner_cxy;      // target process owner cluster
[409]577    uint32_t            count;          // requests counter
[436]578    volatile uint32_t   ack_count;      // scheduler acknowledge counter
[1]579
[416]580    // get calling thread pointer
[433]581    this = CURRENT_THREAD;
[407]582
[436]583    // get target process owner cluster
584    owner_cxy = CXY_FROM_PID( process->pid );
585
[438]586#if DEBUG_PROCESS_SIGACTION
[433]587uint32_t cycle = (uint32_t)hal_get_cycles();
[438]588if( DEBUG_PROCESS_SIGACTION < cycle )
[433]589printk("\n[DBG] %s : thread %x enter for process %x in cluster %x / cycle %d\n",
590__FUNCTION__ , this , process->pid , local_cxy , cycle );
591#endif
[409]592
593    // get lock protecting process th_tbl[]
[1]594    spinlock_lock( &process->th_lock );
595
[440]596    // loop on target process local threads
[409]597    // we use both "ltid" and "count" because it can exist "holes" in th_tbl
[436]598    for( ltid = 0 , count = 0 , ack_count = 0 ; count < process->th_nr ; ltid++ )
[1]599    {
[409]600        target = process->th_tbl[ltid];
[1]601
[436]602        if( target != NULL )                                 // thread exist
[1]603        {
604            count++;
[409]605
[440]606            // main thread and client thread should not be blocked
607            if( ((ltid != 0) || (owner_cxy != local_cxy)) &&         // not main thread
608                (client_xp) != XPTR( local_cxy , target ) )          // not client thread
[416]609            {
610                // set the global blocked bit in target thread descriptor.
[436]611                thread_block( XPTR( local_cxy , target ) , THREAD_BLOCKED_GLOBAL );
612 
613                // - if the calling thread and the target thread are on the same core,
614                //   we don't need confirmation from scheduler,
615                // - if the calling thread and the target thread are not running on the same
616                //   core, we ask the target scheduler to acknowlege the blocking
617                //   to be sure that the target thread is not running.
618           
619                if( this->core->lid != target->core->lid )
620                {
621                    // increment responses counter
622                    hal_atomic_add( (void*)&ack_count , 1 );
[409]623
[436]624                    // set FLAG_REQ_ACK and &ack_rsp_count in target descriptor
625                    thread_set_req_ack( target , (uint32_t *)&ack_count );
[409]626
[436]627                    // force scheduling on target thread
628                    dev_pic_send_ipi( local_cxy , target->core->lid );
629                }
[409]630            }
[1]631        }
[172]632    }
633
[428]634    // release lock protecting process th_tbl[]
[416]635    spinlock_unlock( &process->th_lock );
636
[436]637    // wait acknowledges
[409]638    while( 1 )
639    {
[436]640        // exit when all scheduler acknoledges received
641        if ( ack_count == 0 ) break;
[409]642   
643        // wait 1000 cycles before retry
644        hal_fixed_delay( 1000 );
645    }
[1]646
[438]647#if DEBUG_PROCESS_SIGACTION
[433]648cycle = (uint32_t)hal_get_cycles();
[438]649if( DEBUG_PROCESS_SIGACTION < cycle )
[433]650printk("\n[DBG] %s : thread %x exit for process %x in cluster %x / cycle %d\n",
651__FUNCTION__ , this , process->pid , local_cxy , cycle );
652#endif
[409]653
[428]654}  // end process_block_threads()
[409]655
[440]656/////////////////////////////////////////////////
657void process_delete_threads( process_t * process,
658                             xptr_t      client_xp )
[409]659{
[433]660    thread_t          * this;          // pointer on calling thread
[440]661    thread_t          * target;        // local pointer on target thread
662    xptr_t              target_xp;     // extended pointer on target thread
663    cxy_t               owner_cxy;     // owner process cluster
[409]664    uint32_t            ltid;          // index in process th_tbl
[440]665    uint32_t            count;         // threads counter
[409]666
[433]667    // get calling thread pointer
668    this = CURRENT_THREAD;
[409]669
[440]670    // get target process owner cluster
671    owner_cxy = CXY_FROM_PID( process->pid );
672
[438]673#if DEBUG_PROCESS_SIGACTION
[433]674uint32_t cycle = (uint32_t)hal_get_cycles();
[438]675if( DEBUG_PROCESS_SIGACTION < cycle )
[433]676printk("\n[DBG] %s : thread %x enter for process %x in cluster %x / cycle %d\n",
677__FUNCTION__ , this , process->pid , local_cxy , cycle );
678#endif
679
[409]680    // get lock protecting process th_tbl[]
681    spinlock_lock( &process->th_lock );
682
[440]683    // loop on target process local threads                       
[416]684    // we use both "ltid" and "count" because it can exist "holes" in th_tbl
[440]685    for( ltid = 0 , count = 0  ; count < process->th_nr ; ltid++ )
[1]686    {
[409]687        target = process->th_tbl[ltid];
[1]688
[440]689        if( target != NULL )    // valid thread 
[1]690        {
[416]691            count++;
[440]692            target_xp = XPTR( local_cxy , target );
[1]693
[440]694            // main thread and client thread should not be blocked
695            if( ((ltid != 0) || (owner_cxy != local_cxy)) &&         // not main thread
696                (client_xp) != target_xp )                           // not client thread
697            {
698                // mark target thread for delete and block it
699                thread_delete( target_xp , process->pid , false );   // not forced
700            }
[409]701        }
702    }
[1]703
[428]704    // release lock protecting process th_tbl[]
[416]705    spinlock_unlock( &process->th_lock );
[407]706
[438]707#if DEBUG_PROCESS_SIGACTION
[433]708cycle = (uint32_t)hal_get_cycles();
[438]709if( DEBUG_PROCESS_SIGACTION < cycle )
[433]710printk("\n[DBG] %s : thread %x exit for process %x in cluster %x / cycle %d\n",
711__FUNCTION__ , this , process->pid , local_cxy , cycle );
712#endif
[407]713
[440]714}  // end process_delete_threads()
[409]715
[440]716///////////////////////////////////////////////////
717void process_unblock_threads( process_t * process )
[409]718{
[440]719    thread_t          * target;        // pointer on target thead
720    thread_t          * this;          // pointer on calling thread
[409]721    uint32_t            ltid;          // index in process th_tbl
[440]722    uint32_t            count;         // requests counter
[409]723
[440]724    // get calling thread pointer
725    this = CURRENT_THREAD;
726
[438]727#if DEBUG_PROCESS_SIGACTION
[433]728uint32_t cycle = (uint32_t)hal_get_cycles();
[438]729if( DEBUG_PROCESS_SIGACTION < cycle )
[433]730printk("\n[DBG] %s : thread %x enter for process %x in cluster %x / cycle %d\n",
[440]731__FUNCTION__ , this , process->pid , local_cxy , cycle );
[433]732#endif
733
[416]734    // get lock protecting process th_tbl[]
735    spinlock_lock( &process->th_lock );
736
[440]737    // loop on process threads to unblock all threads
[416]738    // we use both "ltid" and "count" because it can exist "holes" in th_tbl
[440]739    for( ltid = 0 , count = 0 ; count < process->th_nr ; ltid++ )
[409]740    {
[416]741        target = process->th_tbl[ltid];
[409]742
[440]743        if( target != NULL )             // thread found
[409]744        {
745            count++;
[440]746
747            // reset the global blocked bit in target thread descriptor.
748            thread_unblock( XPTR( local_cxy , target ) , THREAD_BLOCKED_GLOBAL );
[1]749        }
750    }
751
[428]752    // release lock protecting process th_tbl[]
[416]753    spinlock_unlock( &process->th_lock );
[407]754
[438]755#if DEBUG_PROCESS_SIGACTION
[433]756cycle = (uint32_t)hal_get_cycles();
[438]757if( DEBUG_PROCESS_SIGACTION < cycle )
[433]758printk("\n[DBG] %s : thread %x exit for process %x in cluster %x / cycle %d\n",
[440]759__FUNCTION__ , this , process->pid , local_cxy , cycle );
[433]760#endif
[1]761
[440]762}  // end process_unblock_threads()
[407]763
[1]764///////////////////////////////////////////////
765process_t * process_get_local_copy( pid_t pid )
766{
767    error_t        error;
[172]768    process_t    * process_ptr;   // local pointer on process
[23]769    xptr_t         process_xp;    // extended pointer on process
[1]770
771    cluster_t * cluster = LOCAL_CLUSTER;
772
773    // get lock protecting local list of processes
[23]774    remote_spinlock_lock( XPTR( local_cxy , &cluster->pmgr.local_lock ) );
[1]775
776    // scan the local list of process descriptors to find the process
[23]777    xptr_t  iter;
778    bool_t  found = false;
779    XLIST_FOREACH( XPTR( local_cxy , &cluster->pmgr.local_root ) , iter )
[1]780    {
[23]781        process_xp  = XLIST_ELEMENT( iter , process_t , local_list );
[435]782        process_ptr = GET_PTR( process_xp );
[23]783        if( process_ptr->pid == pid )
[1]784        {
785            found = true;
786            break;
787        }
788    }
789
790    // release lock protecting local list of processes
[23]791    remote_spinlock_unlock( XPTR( local_cxy , &cluster->pmgr.local_lock ) );
[1]792
[172]793    // allocate memory for a new local process descriptor
[440]794    // and initialise it from reference cluster if not found
[1]795    if( !found )
796    {
797        // get extended pointer on reference process descriptor
[23]798        xptr_t ref_xp = cluster_get_reference_process_from_pid( pid );
[1]799
[23]800        assert( (ref_xp != XPTR_NULL) , __FUNCTION__ , "illegal pid\n" );
801
[1]802        // allocate memory for local process descriptor
[23]803        process_ptr = process_alloc();
[443]804
[23]805        if( process_ptr == NULL )  return NULL;
[1]806
807        // initialize local process descriptor copy
[23]808        error = process_copy_init( process_ptr , ref_xp );
[443]809
[1]810        if( error ) return NULL;
811    }
812
[440]813#if DEBUG_PROCESS_GET_LOCAL_COPY
814uint32_t cycle = (uint32_t)hal_get_cycles();
815if( DEBUG_PROCESS_GET_LOCAL_COPY < cycle )
816printk("\n[DBG] %s : enter in cluster %x / pid %x / process %x / cycle %d\n",
817__FUNCTION__ , local_cxy , pid , process_ptr , cycle );
818#endif
819
[23]820    return process_ptr;
[1]821
[409]822}  // end process_get_local_copy()
823
[436]824////////////////////////////////////////////
825pid_t process_get_ppid( xptr_t  process_xp )
826{
827    cxy_t       process_cxy;
828    process_t * process_ptr;
829    xptr_t      parent_xp;
830    cxy_t       parent_cxy;
831    process_t * parent_ptr;
832
833    // get process cluster and local pointer
834    process_cxy = GET_CXY( process_xp );
835    process_ptr = GET_PTR( process_xp );
836
837    // get pointers on parent process
838    parent_xp  = (xptr_t)hal_remote_lwd( XPTR( process_cxy , &process_ptr->parent_xp ) );
839    parent_cxy = GET_CXY( parent_xp );
840    parent_ptr = GET_PTR( parent_xp );
841
842    return hal_remote_lw( XPTR( parent_cxy , &parent_ptr->pid ) );
843}
844
[1]845//////////////////////////////////////////////////////////////////////////////////////////
846// File descriptor array related functions
847//////////////////////////////////////////////////////////////////////////////////////////
848
849///////////////////////////////////////////
850void process_fd_init( process_t * process )
851{
852    uint32_t fd;
853
854    remote_spinlock_init( XPTR( local_cxy , &process->fd_array.lock ) );
855
[23]856    process->fd_array.current = 0;
857
[1]858    // initialize array
[23]859    for ( fd = 0 ; fd < CONFIG_PROCESS_FILE_MAX_NR ; fd++ )
[1]860    {
861        process->fd_array.array[fd] = XPTR_NULL;
862    }
863}
864
[23]865//////////////////////////////
866bool_t process_fd_array_full()
[1]867{
[172]868    // get extended pointer on reference process
[23]869    xptr_t ref_xp = CURRENT_THREAD->process->ref_xp;
[1]870
[23]871    // get reference process cluster and local pointer
[435]872    process_t * ref_ptr = GET_PTR( ref_xp );
[23]873    cxy_t       ref_cxy = GET_CXY( ref_xp );
[1]874
[23]875    // get number of open file descriptors from reference fd_array
876    uint32_t current = hal_remote_lw( XPTR( ref_cxy , &ref_ptr->fd_array.current ) );
877
[172]878        return ( current >= CONFIG_PROCESS_FILE_MAX_NR );
[1]879}
880
881/////////////////////////////////////////////////
[407]882error_t process_fd_register( process_t * process,
883                             xptr_t      file_xp,
884                             uint32_t  * fdid )
[1]885{
886    bool_t    found;
[23]887    uint32_t  id;
888    xptr_t    xp;
[1]889
[23]890    // get reference process cluster and local pointer
[407]891    xptr_t ref_xp = process->ref_xp;
[435]892    process_t * ref_ptr = GET_PTR( ref_xp );
[23]893    cxy_t       ref_cxy = GET_CXY( ref_xp );
894
895    // take lock protecting reference fd_array
896        remote_spinlock_lock( XPTR( ref_cxy , &ref_ptr->fd_array.lock ) );
897
[1]898    found   = false;
899
[23]900    for ( id = 0; id < CONFIG_PROCESS_FILE_MAX_NR ; id++ )
[1]901    {
[23]902        xp = hal_remote_lwd( XPTR( ref_cxy , &ref_ptr->fd_array.array[id] ) );
903        if ( xp == XPTR_NULL )
[1]904        {
905            found = true;
[23]906            hal_remote_swd( XPTR( ref_cxy , &ref_ptr->fd_array.array[id] ) , file_xp );
907                hal_remote_atomic_add( XPTR( ref_cxy , &ref_ptr->fd_array.current ) , 1 );
[407]908                        *fdid = id;
[1]909            break;
910        }
911    }
912
[23]913    // release lock protecting reference fd_array
914        remote_spinlock_unlock( XPTR( ref_cxy , &ref_ptr->fd_array.lock ) );
[1]915
[428]916    if ( !found ) return -1;
[1]917    else          return 0;
[172]918}
[1]919
[172]920////////////////////////////////////////////////
[23]921xptr_t process_fd_get_xptr( process_t * process,
[407]922                            uint32_t    fdid )
[1]923{
[23]924    xptr_t  file_xp;
[1]925
[23]926    // access local copy of process descriptor
[407]927    file_xp = process->fd_array.array[fdid];
[1]928
[23]929    if( file_xp == XPTR_NULL )
930    {
931        // get reference process cluster and local pointer
932        xptr_t      ref_xp  = process->ref_xp;
933        cxy_t       ref_cxy = GET_CXY( ref_xp );
[435]934        process_t * ref_ptr = GET_PTR( ref_xp );
[1]935
[23]936        // access reference process descriptor
[407]937        file_xp = hal_remote_lwd( XPTR( ref_cxy , &ref_ptr->fd_array.array[fdid] ) );
[1]938
[23]939        // update local fd_array if found
940        if( file_xp != XPTR_NULL )
941        {
[407]942            process->fd_array.array[fdid] = file_xp;
[23]943        }
944    }
[1]945
[23]946    return file_xp;
[1]947
[407]948}  // end process_fd_get_xptr()
949
[1]950///////////////////////////////////////////
951void process_fd_remote_copy( xptr_t dst_xp,
952                             xptr_t src_xp )
953{
954    uint32_t fd;
955    xptr_t   entry;
956
957    // get cluster and local pointer for src fd_array
958    cxy_t        src_cxy = GET_CXY( src_xp );
[435]959    fd_array_t * src_ptr = GET_PTR( src_xp );
[1]960
961    // get cluster and local pointer for dst fd_array
962    cxy_t        dst_cxy = GET_CXY( dst_xp );
[435]963    fd_array_t * dst_ptr = GET_PTR( dst_xp );
[1]964
965    // get the remote lock protecting the src fd_array
966        remote_spinlock_lock( XPTR( src_cxy , &src_ptr->lock ) );
967
[428]968    // loop on all fd_array entries
969    for( fd = 0 ; fd < CONFIG_PROCESS_FILE_MAX_NR ; fd++ )
[1]970        {
971                entry = (xptr_t)hal_remote_lwd( XPTR( src_cxy , &src_ptr->array[fd] ) );
972
973                if( entry != XPTR_NULL )
974                {
975            // increment file descriptor ref count
976            vfs_file_count_up( entry );
977
978                        // copy entry in destination process fd_array
979                        hal_remote_swd( XPTR( dst_cxy , &dst_ptr->array[fd] ) , entry );
980                }
981        }
982
983    // release lock on source process fd_array
984        remote_spinlock_unlock( XPTR( src_cxy , &src_ptr->lock ) );
985
[407]986}  // end process_fd_remote_copy()
987
[1]988////////////////////////////////////////////////////////////////////////////////////
989//  Thread related functions
990////////////////////////////////////////////////////////////////////////////////////
991
992/////////////////////////////////////////////////////
993error_t process_register_thread( process_t * process,
994                                 thread_t  * thread,
995                                 trdid_t   * trdid )
996{
997    ltid_t   ltid;
[428]998    bool_t   found = false;
[1]999
[14]1000    assert( (process != NULL) , __FUNCTION__ , "process argument is NULL" );
[1]1001
[14]1002    assert( (thread != NULL) , __FUNCTION__ , "thread argument is NULL" );
1003
[428]1004    // take lock protecting th_tbl
1005    spinlock_lock( &process->th_lock );
1006
[407]1007    // search a free slot in th_tbl[]
[428]1008    for( ltid = 0 ; ltid < CONFIG_THREAD_MAX_PER_CLUSTER ; ltid++ )
[1]1009    {
1010        if( process->th_tbl[ltid] == NULL )
1011        {
1012            found = true;
1013            break;
1014        }
1015    }
1016
1017    if( found )
1018    {
1019        // register thread in th_tbl[]
1020        process->th_tbl[ltid] = thread;
1021        process->th_nr++;
1022
1023        // returns trdid
1024        *trdid = TRDID( local_cxy , ltid );
1025    }
1026
[428]1027    // release lock protecting th_tbl
1028    hal_fence();
1029    spinlock_unlock( &process->th_lock );
1030
[1]1031    return (found) ? 0 : ENOMEM;
[204]1032
1033}  // end process_register_thread()
1034
[443]1035/////////////////////////////////////////////////
1036bool_t process_remove_thread( thread_t * thread )
[1]1037{
[443]1038    uint32_t count;  // number of threads in local process descriptor
1039
[373]1040    assert( (thread != NULL) , __FUNCTION__ , "thread argument is NULL" );
[172]1041
[1]1042    process_t * process = thread->process;
1043
1044    // get thread local index
1045    ltid_t  ltid = LTID_FROM_TRDID( thread->trdid );
1046
[428]1047    // take lock protecting th_tbl
1048    spinlock_lock( &process->th_lock );
1049
[443]1050    count = process->th_nr;
[428]1051
[443]1052    assert( (count > 0) , __FUNCTION__ , "process th_nr cannot be 0\n" );
1053
[1]1054    // remove thread from th_tbl[]
1055    process->th_tbl[ltid] = NULL;
1056    process->th_nr--;
1057
[443]1058    // release lock protecting th_tbl
[428]1059    hal_fence();
1060    spinlock_unlock( &process->th_lock );
1061
[443]1062    return (count == 1);
1063
[204]1064}  // process_remove_thread()
1065
[408]1066/////////////////////////////////////////////////////////
1067error_t process_make_fork( xptr_t      parent_process_xp,
1068                           xptr_t      parent_thread_xp,
1069                           pid_t     * child_pid,
1070                           thread_t ** child_thread )
[1]1071{
[408]1072    process_t * process;         // local pointer on child process descriptor
1073    thread_t  * thread;          // local pointer on child thread descriptor
1074    pid_t       new_pid;         // process identifier for child process
1075    pid_t       parent_pid;      // process identifier for parent process
1076    xptr_t      ref_xp;          // extended pointer on reference process
[428]1077    xptr_t      vfs_bin_xp;      // extended pointer on .elf file
[408]1078    error_t     error;
[1]1079
[408]1080    // get cluster and local pointer for parent process
1081    cxy_t       parent_process_cxy = GET_CXY( parent_process_xp );
[435]1082    process_t * parent_process_ptr = GET_PTR( parent_process_xp );
[101]1083
[428]1084    // get parent process PID and extended pointer on .elf file
1085    parent_pid = hal_remote_lw (XPTR( parent_process_cxy , &parent_process_ptr->pid));
1086    vfs_bin_xp = hal_remote_lwd(XPTR( parent_process_cxy , &parent_process_ptr->vfs_bin_xp));
1087
[438]1088    // check parent process is the reference process
[408]1089    ref_xp = hal_remote_lwd( XPTR( parent_process_cxy , &parent_process_ptr->ref_xp ) );
[438]1090
[408]1091    assert( (parent_process_xp == ref_xp ) , __FUNCTION__ ,
1092    "parent process must be the reference process\n" );
[407]1093
[438]1094#if DEBUG_PROCESS_MAKE_FORK
[433]1095uint32_t cycle = (uint32_t)hal_get_cycles();
[438]1096if( DEBUG_PROCESS_MAKE_FORK < cycle )
1097printk("\n[DBG] %s : thread %x enter for process %x / cluster %x / cycle %d\n",
1098__FUNCTION__, CURRENT_THREAD, parent_pid, local_cxy, cycle );
[433]1099#endif
[172]1100
[408]1101    // allocate a process descriptor
1102    process = process_alloc();
1103    if( process == NULL )
1104    {
1105        printk("\n[ERROR] in %s : cannot get process in cluster %x\n", 
1106        __FUNCTION__, local_cxy ); 
1107        return -1;
1108    }
[1]1109
[408]1110    // allocate a child PID from local cluster
[416]1111    error = cluster_pid_alloc( process , &new_pid );
[428]1112    if( error ) 
[1]1113    {
[408]1114        printk("\n[ERROR] in %s : cannot get PID in cluster %x\n", 
1115        __FUNCTION__, local_cxy ); 
1116        process_free( process );
1117        return -1;
[1]1118    }
[408]1119
1120    // initializes child process descriptor from parent process descriptor
1121    process_reference_init( process,
1122                            new_pid,
[428]1123                            parent_process_xp,
[408]1124                            parent_process_xp );
1125
[438]1126#if( DEBUG_PROCESS_MAKE_FORK & 1 )
[433]1127cycle = (uint32_t)hal_get_cycles();
[438]1128if( DEBUG_PROCESS_MAKE_FORK < cycle )
[433]1129printk("\n[DBG] %s : thread %x created child_process %x / child_pid %x / cycle %d\n",
1130__FUNCTION__, CURRENT_THREAD, process, new_pid, cycle );
1131#endif
[408]1132
1133    // copy VMM from parent descriptor to child descriptor
1134    error = vmm_fork_copy( process,
1135                           parent_process_xp );
1136    if( error )
[101]1137    {
[408]1138        printk("\n[ERROR] in %s : cannot copy VMM in cluster %x\n", 
1139        __FUNCTION__, local_cxy ); 
1140        process_free( process );
1141        cluster_pid_release( new_pid );
1142        return -1;
[101]1143    }
[172]1144
[438]1145#if( DEBUG_PROCESS_MAKE_FORK & 1 )
[433]1146cycle = (uint32_t)hal_get_cycles();
[438]1147if( DEBUG_PROCESS_MAKE_FORK < cycle )
[433]1148printk("\n[DBG] %s : thread %x copied VMM from parent %x to child %x / cycle %d\n",
1149__FUNCTION__ , CURRENT_THREAD , parent_pid, new_pid, cycle );
1150#endif
[407]1151
[428]1152    // update extended pointer on .elf file
1153    process->vfs_bin_xp = vfs_bin_xp;
1154
[408]1155    // create child thread descriptor from parent thread descriptor
1156    error = thread_user_fork( parent_thread_xp,
1157                              process,
1158                              &thread );
1159    if( error )
1160    {
1161        printk("\n[ERROR] in %s : cannot create thread in cluster %x\n",
1162        __FUNCTION__, local_cxy ); 
1163        process_free( process );
1164        cluster_pid_release( new_pid );
1165        return -1;
1166    }
[172]1167
[438]1168    // check main thread LTID
1169    assert( (LTID_FROM_TRDID(thread->trdid) == 0) , __FUNCTION__ ,
1170    "main thread must have LTID == 0\n" );
[428]1171
[438]1172#if( DEBUG_PROCESS_MAKE_FORK & 1 )
[433]1173cycle = (uint32_t)hal_get_cycles();
[438]1174if( DEBUG_PROCESS_MAKE_FORK < cycle )
[441]1175printk("\n[DBG] %s : thread %x created child thread %x on core[%x,%d] / cycle %d\n", 
1176__FUNCTION__ , CURRENT_THREAD, thread, local_cxy, thread->core->lid, cycle );
[433]1177#endif
[1]1178
[433]1179    // set Copy_On_Write flag in parent process GPT
[408]1180    // this includes all replicated GPT copies
1181    if( parent_process_cxy == local_cxy )   // reference is local
1182    {
1183        vmm_set_cow( parent_process_ptr );
1184    }
1185    else                                    // reference is remote
1186    {
1187        rpc_vmm_set_cow_client( parent_process_cxy,
1188                                parent_process_ptr );
1189    }
[1]1190
[433]1191    // set Copy_On_Write flag in child process GPT
1192    vmm_set_cow( process );
1193 
[438]1194#if( DEBUG_PROCESS_MAKE_FORK & 1 )
[433]1195cycle = (uint32_t)hal_get_cycles();
[438]1196if( DEBUG_PROCESS_MAKE_FORK < cycle )
[433]1197printk("\n[DBG] %s : thread %x set COW in parent and child / cycle %d\n",
1198__FUNCTION__ , CURRENT_THREAD, cycle );
1199#endif
[101]1200
[428]1201    // get extended pointers on parent children_root, children_lock and children_nr
1202    xptr_t children_root_xp = XPTR( parent_process_cxy , &parent_process_ptr->children_root );
1203    xptr_t children_lock_xp = XPTR( parent_process_cxy , &parent_process_ptr->children_lock );
1204    xptr_t children_nr_xp   = XPTR( parent_process_cxy , &parent_process_ptr->children_nr   );
[101]1205
[428]1206    // register process in parent children list
1207    remote_spinlock_lock( children_lock_xp );
1208        xlist_add_last( children_root_xp , XPTR( local_cxy , &process->children_list ) );
1209        hal_remote_atomic_add( children_nr_xp , 1 );
1210    remote_spinlock_unlock( children_lock_xp );
[204]1211
[408]1212    // return success
1213    *child_thread = thread;
1214    *child_pid    = new_pid;
[1]1215
[438]1216#if DEBUG_PROCESS_MAKE_FORK
[433]1217cycle = (uint32_t)hal_get_cycles();
[438]1218if( DEBUG_PROCESS_MAKE_FORK < cycle )
[433]1219printk("\n[DBG] %s : thread %x exit / cycle %d\n",
1220__FUNCTION__, CURRENT_THREAD, cycle );
1221#endif
[428]1222
[408]1223    return 0;
1224
[416]1225}   // end process_make_fork()
[408]1226
[409]1227
[408]1228/////////////////////////////////////////////////////
1229error_t process_make_exec( exec_info_t  * exec_info )
1230{
1231    char           * path;                    // pathname to .elf file
[441]1232    pid_t            pid;                     // old_process PID, given to new_process
[433]1233    pid_t            temp_pid;                // temporary PID / given to old_process
[416]1234    process_t      * old_process;             // local pointer on old process
[433]1235    thread_t       * old_thread;              // local pointer on old thread
[416]1236    process_t      * new_process;             // local pointer on new process
[433]1237    thread_t       * new_thread;              // local pointer on new thread
1238    xptr_t           parent_xp;               // extended pointer on parent process
1239    pthread_attr_t   attr;                    // new thread attributes
[408]1240    lid_t            lid;                     // selected core local index
[441]1241        error_t          error;                   // value returned by called functions
1242   
[433]1243    // get old_thread / old_process / PID / parent_xp
1244    old_thread  = CURRENT_THREAD;
1245    old_process = old_thread->process;
1246    pid         = old_process->pid;
1247    parent_xp   = old_process->parent_xp;
1248   
1249        // get .elf pathname from exec_info
[441]1250        path        = exec_info->path;
[408]1251
[416]1252    // this function must be executed by a thread running in owner cluster
[428]1253    assert( (CXY_FROM_PID( pid ) == local_cxy), __FUNCTION__,
[433]1254    "local_cluster must be owner_cluster\n" );
[408]1255
[433]1256    assert( (LTID_FROM_TRDID( old_thread->trdid ) == 0) , __FUNCTION__,
1257    "must be called by the main thread\n" );
1258 
[438]1259#if DEBUG_PROCESS_MAKE_EXEC
[433]1260uint32_t cycle = (uint32_t)hal_get_cycles();
[438]1261if( DEBUG_PROCESS_MAKE_EXEC < cycle )
[433]1262printk("\n[DBG] %s : thread %x enters for process %x / %s / cycle %d\n",
1263__FUNCTION__, old_thread, pid, path, cycle );
1264#endif
[408]1265
[428]1266     // allocate memory for new_process descriptor
[416]1267    new_process = process_alloc();
[408]1268
[416]1269    if( new_process == NULL )
1270    {
[441]1271        printk("\n[ERROR] in %s : cannot allocate process for %s\n", __FUNCTION__ , path );
[416]1272        return -1;
1273    }
1274
[433]1275    // get a temporary PID for old_process
[428]1276    error = cluster_pid_alloc( old_process , &temp_pid );
1277    if( error ) 
[416]1278    {
[428]1279        printk("\n[ERROR] in %s : cannot get PID in cluster %x\n", 
1280        __FUNCTION__ , local_cxy ); 
1281        process_free( new_process );
[416]1282        return -1;
1283    }
1284
[433]1285    // set temporary PID to old_process
[428]1286    old_process->pid = temp_pid;
1287
[408]1288    // initialize new process descriptor
[416]1289    process_reference_init( new_process,
[428]1290                            pid,
[433]1291                            parent_xp,                          // parent_process_xp
1292                            XPTR(local_cxy , old_process) );    // model_process
[408]1293
[428]1294    // give TXT ownership to new_process
[436]1295    process_txt_set_ownership( XPTR( local_cxy , new_process) );
[408]1296
[438]1297#if( DEBUG_PROCESS_MAKE_EXEC & 1 )
[433]1298cycle = (uint32_t)hal_get_cycles();
[438]1299if( DEBUG_PROCESS_MAKE_EXEC < cycle )
[433]1300printk("\n[DBG] %s : thread %x created new process %x / cycle %d \n",
1301__FUNCTION__ , old_thread , new_process , cycle );
1302#endif
[428]1303
1304    // register code & data vsegs as well as entry-point in new process VMM,
1305    // and register extended pointer on .elf file in process descriptor
[441]1306        error = elf_load_process( path , new_process );
1307
1308    if( error )
[1]1309        {
[441]1310                printk("\n[ERROR] in %s : failed to access <%s>\n", __FUNCTION__ , path );
1311        process_txt_set_ownership( XPTR( local_cxy , old_process) );
1312        process_txt_detach( XPTR( local_cxy , new_process) );
[416]1313        process_destroy( new_process );
[441]1314        old_process->pid = pid;
[408]1315        return -1;
[1]1316        }
1317
[438]1318#if( DEBUG_PROCESS_MAKE_EXEC & 1 )
[433]1319cycle = (uint32_t)hal_get_cycles();
[438]1320if( DEBUG_PROCESS_MAKE_EXEC < cycle )
[433]1321printk("\n[DBG] %s : thread %x registered code/data vsegs in new process %x / cycle %d\n",
1322__FUNCTION__, old_thread , new_process->pid , cycle );
1323#endif
[1]1324
[408]1325    // select a core in local cluster to execute the main thread
[1]1326    lid  = cluster_select_local_core();
1327
1328    // initialize pthread attributes for main thread
[23]1329    attr.attributes = PT_ATTR_DETACH | PT_ATTR_CLUSTER_DEFINED | PT_ATTR_CORE_DEFINED;
1330    attr.cxy        = local_cxy;
1331    attr.lid        = lid;
[1]1332
[428]1333    // create and initialize main thread in local cluster
1334        error = thread_user_create( pid,
[416]1335                                (void *)new_process->vmm.entry_point,
[23]1336                                exec_info->args_pointers,
[1]1337                                &attr,
[416]1338                                &new_thread );
[1]1339        if( error )
1340        {
[441]1341                printk("\n[ERROR] in %s : cannot create thread for %s\n", __FUNCTION__ , path );
1342        process_txt_set_ownership( XPTR( local_cxy , old_process) );
1343        process_txt_detach( XPTR( local_cxy , new_process) );
[416]1344        process_destroy( new_process );
[441]1345        old_process->pid = pid;
[408]1346        return -1;
[1]1347        }
1348
[438]1349    // check main thread LTID
1350    assert( (LTID_FROM_TRDID(new_thread->trdid) == 0) , __FUNCTION__ ,
1351    "main thread must have LTID == 0\n" );
[204]1352
[438]1353#if( DEBUG_PROCESS_MAKE_EXEC & 1 )
[433]1354cycle = (uint32_t)hal_get_cycles();
[438]1355if( DEBUG_PROCESS_MAKE_EXEC < cycle )
[433]1356printk("\n[DBG] %s : thread %x created new_process main thread %x / cycle %d\n",
1357__FUNCTION__ , old_thread , new_thread , cycle );
1358#endif
[101]1359
[433]1360    // get cluster and local pointer on parent process
[428]1361    process_t * parent_ptr = GET_PTR( parent_xp );
1362    cxy_t       parent_cxy = GET_CXY( parent_xp );
[408]1363
[428]1364    // get extended pointers on parent children_root, children_lock and children_nr
1365    xptr_t root_xp = XPTR( parent_cxy , &parent_ptr->children_root );
1366    xptr_t lock_xp = XPTR( parent_cxy , &parent_ptr->children_lock );
1367    xptr_t nr_xp   = XPTR( parent_cxy , &parent_ptr->children_nr   );
[416]1368
[428]1369    // register new_process in parent children list
1370    remote_spinlock_lock( lock_xp );
1371        xlist_add_last( root_xp , XPTR( local_cxy , &new_process->children_list ) );
1372        hal_remote_atomic_add( nr_xp , 1 );
1373    remote_spinlock_unlock( lock_xp );
[416]1374
[172]1375    // activate new thread
[416]1376        thread_unblock( XPTR( local_cxy , new_thread ) , THREAD_BLOCKED_GLOBAL );
[1]1377
[436]1378    // detach old_process from TXT
1379    process_txt_detach( XPTR( local_cxy , old_process ) );
1380
[445]1381    // block this old_thread
[436]1382    thread_block( XPTR( local_cxy , old_thread ) , THREAD_BLOCKED_GLOBAL );
[433]1383
[445]1384    // atomically update old_process descriptor term_state to ask
1385    // the parent process (wait() function) to delete this old_thread
1386    hal_atomic_or( &old_process->term_state , PROCESS_TERM_EXIT );
1387
[428]1388    hal_fence();
[204]1389
[438]1390#if DEBUG_PROCESS_MAKE_EXEC
[433]1391cycle = (uint32_t)hal_get_cycles();
[438]1392if( DEBUG_PROCESS_MAKE_EXEC < cycle )
[441]1393printk("\n[DBG] %s : old thread %x blocked for delete / new thread %x activated / cycle %d\n",
[433]1394__FUNCTION__ , old_thread , new_thread , cycle );
1395#endif
1396   
[409]1397        return 0;
1398
1399}  // end process_make_exec()
1400
[428]1401///////////////////////////////////////////////
1402void process_zero_create( process_t * process )
1403{
1404
[438]1405#if DEBUG_PROCESS_ZERO_CREATE
[433]1406uint32_t cycle = (uint32_t)hal_get_cycles();
[438]1407if( DEBUG_PROCESS_ZERO_CREATE < cycle )
[433]1408printk("\n[DBG] %s : thread %x enter / cycle %d\n", __FUNCTION__, CURRENT_THREAD, cycle );
1409#endif
[428]1410
1411    // initialize PID, REF_XP, PARENT_XP, and STATE
[433]1412    process->pid        = 0;
1413    process->ref_xp     = XPTR( local_cxy , process );
[443]1414    process->owner_xp   = XPTR( local_cxy , process );
[433]1415    process->parent_xp  = XPTR_NULL;
1416    process->term_state = 0;
[428]1417
1418    // reset th_tbl[] array as empty
1419    uint32_t i;
1420    for( i = 0 ; i < CONFIG_THREAD_MAX_PER_CLUSTER ; i++ )
1421        {
1422        process->th_tbl[i] = NULL;
1423    }
1424    process->th_nr  = 0;
1425    spinlock_init( &process->th_lock );
1426
1427    // reset children list as empty
1428    xlist_root_init( XPTR( local_cxy , &process->children_root ) );
1429    remote_spinlock_init( XPTR( local_cxy , &process->children_lock ) );
1430    process->children_nr = 0;
1431
1432        hal_fence();
1433
[438]1434#if DEBUG_PROCESS_ZERO_CREATE
[433]1435cycle = (uint32_t)hal_get_cycles();
[438]1436if( DEBUG_PROCESS_ZERO_CREATE < cycle )
[433]1437printk("\n[DBG] %s : thread %x exit / cycle %d\n", __FUNCTION__, CURRENT_THREAD, cycle );
1438#endif
[428]1439
1440}  // end process_zero_init()
1441
[1]1442//////////////////////////
1443void process_init_create()
1444{
[428]1445    process_t      * process;       // local pointer on process descriptor
[409]1446    pid_t            pid;           // process_init identifier
1447    thread_t       * thread;        // local pointer on main thread
1448    pthread_attr_t   attr;          // main thread attributes
1449    lid_t            lid;           // selected core local index for main thread
1450    error_t          error;
[1]1451
[438]1452#if DEBUG_PROCESS_INIT_CREATE
[433]1453uint32_t cycle = (uint32_t)hal_get_cycles();
[438]1454if( DEBUG_PROCESS_INIT_CREATE < cycle )
[433]1455printk("\n[DBG] %s : thread %x enter / cycle %d\n", __FUNCTION__, CURRENT_THREAD, cycle );
1456#endif
[1]1457
[408]1458    // allocates memory for process descriptor from local cluster
1459        process = process_alloc(); 
1460        if( process == NULL )
1461    {
1462                printk("\n[PANIC] in %s : no memory for process descriptor in cluster %x\n",
[409]1463                __FUNCTION__, local_cxy  );
[408]1464    }
[101]1465
[409]1466    // get PID from local cluster
[416]1467    error = cluster_pid_alloc( process , &pid );
[408]1468    if( error )
1469    {
1470                printk("\n[PANIC] in %s : cannot allocate PID in cluster %x\n",
1471                __FUNCTION__, local_cxy );
[428]1472        process_free( process );
[408]1473    }
1474
[428]1475    // check allocated PID
1476    assert( (pid == 1) , __FUNCTION__ , "process INIT must be first process in cluster 0\n" );
[409]1477
1478    // initialize process descriptor / parent is local process_zero
1479    process_reference_init( process,
[408]1480                            pid,
[428]1481                            XPTR( local_cxy , &process_zero ),     // parent
1482                            XPTR( local_cxy , &process_zero ) );   // model
[408]1483
[409]1484    // register "code" and "data" vsegs as well as entry-point
1485    // in process VMM, using information contained in the elf file.
1486        if( elf_load_process( CONFIG_PROCESS_INIT_PATH , process ) )
1487        {
1488                printk("\n[PANIC] in %s : cannot access .elf file / path = %s\n",
1489                __FUNCTION__, CONFIG_PROCESS_INIT_PATH );
1490        process_destroy( process );
1491        }
[101]1492
[428]1493    // get extended pointers on process_zero children_root, children_lock
1494    xptr_t children_root_xp = XPTR( local_cxy , &process_zero.children_root );
1495    xptr_t children_lock_xp = XPTR( local_cxy , &process_zero.children_lock );
1496
1497    // register process INIT in parent local process_zero
1498    remote_spinlock_lock( children_lock_xp );
1499        xlist_add_last( children_root_xp , XPTR( local_cxy , &process->children_list ) );
1500        hal_atomic_add( &process_zero.children_nr , 1 );
1501    remote_spinlock_unlock( children_lock_xp );
1502
[409]1503    // select a core in local cluster to execute the main thread
1504    lid  = cluster_select_local_core();
1505
1506    // initialize pthread attributes for main thread
1507    attr.attributes = PT_ATTR_DETACH | PT_ATTR_CLUSTER_DEFINED | PT_ATTR_CORE_DEFINED;
1508    attr.cxy        = local_cxy;
1509    attr.lid        = lid;
1510
1511    // create and initialize thread descriptor
1512        error = thread_user_create( pid,
1513                                (void *)process->vmm.entry_point,
1514                                NULL,
1515                                &attr,
1516                                &thread );
[408]1517        if( error )
[409]1518        {
1519                printk("\n[PANIC] in %s : cannot create main thread / path = %s\n",
1520                __FUNCTION__, CONFIG_PROCESS_INIT_PATH );
1521        process_destroy( process );
1522        }
[1]1523
[428]1524    // check main thread index
1525    assert( (thread->trdid == 0) , __FUNCTION__ , "main thread must have index 0\n" );
1526
[409]1527    // activate thread
1528        thread_unblock( XPTR( local_cxy , thread ) , THREAD_BLOCKED_GLOBAL );
1529
[124]1530    hal_fence();
[1]1531
[438]1532#if DEBUG_PROCESS_INIT_CREATE
[433]1533cycle = (uint32_t)hal_get_cycles();
[438]1534if( DEBUG_PROCESS_INIT_CREATE < cycle )
[433]1535printk("\n[DBG] %s : thread %x exit / cycle %d\n", __FUNCTION__, CURRENT_THREAD, cycle );
1536#endif
[409]1537
[204]1538}  // end process_init_create()
1539
[428]1540/////////////////////////////////////////
1541void process_display( xptr_t process_xp )
1542{
1543    process_t   * process_ptr;
1544    cxy_t         process_cxy;
[443]1545
[428]1546    xptr_t        parent_xp;       // extended pointer on parent process
1547    process_t   * parent_ptr;
1548    cxy_t         parent_cxy;
1549
[443]1550    xptr_t        owner_xp;        // extended pointer on owner process
1551    process_t   * owner_ptr;
1552    cxy_t         owner_cxy;
1553
[428]1554    pid_t         pid;
1555    pid_t         ppid;
1556    uint32_t      state;
1557    uint32_t      th_nr;
1558
[443]1559    xptr_t        txt_file_xp;     // extended pointer on TXT_RX file descriptor
1560    xptr_t        txt_chdev_xp;    // extended pointer on TXT_RX chdev
1561    chdev_t     * txt_chdev_ptr;
1562    cxy_t         txt_chdev_cxy;
1563    xptr_t        txt_owner_xp;    // extended pointer on TXT owner process
[428]1564
1565    xptr_t        elf_file_xp;     // extended pointer on .elf file
1566    cxy_t         elf_file_cxy;
1567    vfs_file_t  * elf_file_ptr;
1568    vfs_inode_t * elf_inode_ptr;   // local pointer on .elf inode
1569
1570    char          txt_name[CONFIG_VFS_MAX_NAME_LENGTH];
1571    char          elf_name[CONFIG_VFS_MAX_NAME_LENGTH];
1572
1573    // get cluster and local pointer on process
1574    process_ptr = GET_PTR( process_xp );
1575    process_cxy = GET_CXY( process_xp );
1576
1577    // get PID and state
1578    pid   = hal_remote_lw( XPTR( process_cxy , &process_ptr->pid ) );
[433]1579    state = hal_remote_lw( XPTR( process_cxy , &process_ptr->term_state ) );
[428]1580
1581    // get PPID
1582    parent_xp  = hal_remote_lwd( XPTR( process_cxy , &process_ptr->parent_xp ) );
1583    parent_cxy = GET_CXY( parent_xp );
1584    parent_ptr = GET_PTR( parent_xp );
1585    ppid       = hal_remote_lw( XPTR( parent_cxy , &parent_ptr->pid ) );
1586
1587    // get number of threads
1588    th_nr      = hal_remote_lw( XPTR( process_cxy , &process_ptr->th_nr ) );
1589
[443]1590    // get pointers on owner process descriptor
1591    owner_xp  = hal_remote_lwd( XPTR( process_cxy , &process_ptr->owner_xp ) );
1592    owner_cxy = GET_CXY( owner_xp );
1593    owner_ptr = GET_PTR( owner_xp );
[428]1594
[443]1595    // get extended pointer on TXT_RX file descriptor attached to process
1596    txt_file_xp = hal_remote_lwd( XPTR( owner_cxy , &owner_ptr->fd_array.array[0] ) );
1597
[428]1598    assert( (txt_file_xp != XPTR_NULL) , __FUNCTION__ , 
1599    "process must be attached to one TXT terminal\n" ); 
1600
[443]1601    // get TXT_RX chdev pointers
1602    txt_chdev_xp  = chdev_from_file( txt_file_xp );
1603    txt_chdev_cxy = GET_CXY( txt_chdev_xp );
1604    txt_chdev_ptr = GET_PTR( txt_chdev_xp );
1605
1606    // get TXT_RX name and ownership
[428]1607    hal_remote_strcpy( XPTR( local_cxy , txt_name ) ,
[443]1608                       XPTR( txt_chdev_cxy , txt_chdev_ptr->name ) );
[428]1609   
[443]1610    txt_owner_xp = (xptr_t)hal_remote_lwd( XPTR( txt_chdev_cxy, 
1611                                                 &txt_chdev_ptr->ext.txt.owner_xp ) );
1612   
[428]1613    // get process .elf name
1614    elf_file_xp   = hal_remote_lwd( XPTR( process_cxy , &process_ptr->vfs_bin_xp ) );
1615    elf_file_cxy  = GET_CXY( elf_file_xp );
1616    elf_file_ptr  = (vfs_file_t *)GET_PTR( elf_file_xp );
1617    elf_inode_ptr = (vfs_inode_t *)hal_remote_lpt( XPTR( elf_file_cxy , &elf_file_ptr->inode ) );
1618    vfs_inode_get_name( XPTR( elf_file_cxy , elf_inode_ptr ) , elf_name );
1619
1620    // display process info
[443]1621    if( txt_owner_xp == process_xp )
[428]1622    {
[443]1623        nolock_printk("PID %X | PPID %X | STS %X | %s (FG) | %X | %d | %s\n", 
[433]1624        pid, ppid, state, txt_name, process_ptr, th_nr, elf_name );
[428]1625    }
1626    else
1627    {
[443]1628        nolock_printk("PID %X | PPID %X | STS %X | %s (BG) | %X | %d | %s\n", 
[433]1629        pid, ppid, state, txt_name, process_ptr, th_nr, elf_name );
[428]1630    }
1631}  // end process_display()
1632
1633
1634////////////////////////////////////////////////////////////////////////////////////////
1635//     Terminals related functions
1636////////////////////////////////////////////////////////////////////////////////////////
1637
1638////////////////////////////
1639uint32_t process_txt_alloc()
1640{
1641    uint32_t  index;       // TXT terminal index
1642    xptr_t    chdev_xp;    // extended pointer on TXT_RX chdev
1643    chdev_t * chdev_ptr;   // local pointer on TXT_RX chdev
1644    cxy_t     chdev_cxy;   // TXT_RX chdev cluster
1645    xptr_t    root_xp;     // extended pointer on owner field in chdev
1646
1647    // scan the user TXT_RX chdevs (TXT0 is reserved for kernel)
1648    for( index = 1 ; index < LOCAL_CLUSTER->nb_txt_channels ; index ++ )
1649    {
1650        // get pointers on TXT_RX[index]
1651        chdev_xp  = chdev_dir.txt_rx[index];
1652        chdev_cxy = GET_CXY( chdev_xp );
1653        chdev_ptr = GET_PTR( chdev_xp );
1654
1655        // get extended pointer on root of attached process
1656        root_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.root );
1657
1658        // return free TXT index if found
1659        if( xlist_is_empty( root_xp ) ) return index; 
1660    }
1661
1662    assert( false , __FUNCTION__ , "no free TXT terminal found" );
1663
1664    return -1;
1665
1666} // end process_txt_alloc()
1667
1668/////////////////////////////////////////////
1669void process_txt_attach( process_t * process,
1670                         uint32_t    txt_id )
1671{
1672    xptr_t      chdev_xp;     // extended pointer on TXT_RX chdev
1673    cxy_t       chdev_cxy;    // TXT_RX chdev cluster
1674    chdev_t *   chdev_ptr;    // local pointer on TXT_RX chdev
1675    xptr_t      root_xp;      // extended pointer on list root in chdev
1676    xptr_t      lock_xp;      // extended pointer on list lock in chdev
1677
[438]1678#if DEBUG_PROCESS_TXT_ATTACH
[433]1679uint32_t cycle = (uint32_t)hal_get_cycles();
[438]1680if( DEBUG_PROCESS_TXT_ATTACH < cycle )
[433]1681printk("\n[DBG] %s : thread %x enter for process %x / txt_id = %d  / cycle %d\n",
[436]1682__FUNCTION__, CURRENT_THREAD, process->pid, txt_id, cycle );
[433]1683#endif
[428]1684
[436]1685    // check process is in owner cluster
1686    assert( (CXY_FROM_PID( process->pid ) == local_cxy) , __FUNCTION__ ,
1687    "process descriptor not in owner cluster" );
[428]1688
1689    // check terminal index
1690    assert( (txt_id < LOCAL_CLUSTER->nb_txt_channels) ,
1691    __FUNCTION__ , "illegal TXT terminal index" );
1692
1693    // get pointers on TXT_RX[txt_id] chdev
1694    chdev_xp  = chdev_dir.txt_rx[txt_id];
1695    chdev_cxy = GET_CXY( chdev_xp );
1696    chdev_ptr = GET_PTR( chdev_xp );
1697
1698    // get extended pointer on root & lock of attached process list
1699    root_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.root );
1700    lock_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.lock );
1701
1702    // insert process in attached process list
1703    remote_spinlock_lock( lock_xp );
1704    xlist_add_last( root_xp , XPTR( local_cxy , &process->txt_list ) );
1705    remote_spinlock_unlock( lock_xp );
1706
[438]1707#if DEBUG_PROCESS_TXT_ATTACH
[433]1708cycle = (uint32_t)hal_get_cycles();
[438]1709if( DEBUG_PROCESS_TXT_ATTACH < cycle )
[433]1710printk("\n[DBG] %s : thread %x exit for process %x / txt_id = %d / cycle %d\n",
[436]1711__FUNCTION__, CURRENT_THREAD, process->pid, txt_id , cycle );
[433]1712#endif
[428]1713
1714} // end process_txt_attach()
1715
[436]1716/////////////////////////////////////////////
1717void process_txt_detach( xptr_t  process_xp )
[428]1718{
[436]1719    process_t * process_ptr;  // local pointer on process in owner cluster
1720    cxy_t       process_cxy;  // process owner cluster
1721    pid_t       process_pid;  // process identifier
1722    xptr_t      file_xp;      // extended pointer on stdin file
[428]1723    xptr_t      chdev_xp;     // extended pointer on TXT_RX chdev
1724    cxy_t       chdev_cxy;    // TXT_RX chdev cluster
1725    chdev_t *   chdev_ptr;    // local pointer on TXT_RX chdev
1726    xptr_t      lock_xp;      // extended pointer on list lock in chdev
1727
[436]1728    // get process cluster, local pointer, and PID
1729    process_cxy = GET_CXY( process_xp );
1730    process_ptr = GET_PTR( process_xp );
1731    process_pid = hal_remote_lw( XPTR( process_cxy , &process_ptr->pid ) );
1732
1733    // check process descriptor in owner cluster
1734    assert( (CXY_FROM_PID( process_pid ) == process_cxy ) , __FUNCTION__ ,
1735    "process descriptor not in owner cluster" );
1736
[438]1737#if DEBUG_PROCESS_TXT_ATTACH
[433]1738uint32_t cycle = (uint32_t)hal_get_cycles();
[438]1739if( DEBUG_PROCESS_TXT_ATTACH < cycle )
[433]1740printk("\n[DBG] %s : thread %x enter for process %x / cycle %d\n",
[436]1741__FUNCTION__, CURRENT_THREAD, process_pid, cycle );
[433]1742#endif
[428]1743
[436]1744    // release TXT ownership (does nothing if not TXT owner)
1745    process_txt_transfer_ownership( process_xp );
[428]1746
[436]1747    // get extended pointer on process stdin file
1748    file_xp = (xptr_t)hal_remote_lwd( XPTR( process_cxy , &process_ptr->fd_array.array[0] ) );
1749
1750    // get pointers on TXT_RX chdev
1751    chdev_xp  = chdev_from_file( file_xp );
[428]1752    chdev_cxy = GET_CXY( chdev_xp );
1753    chdev_ptr = (chdev_t *)GET_PTR( chdev_xp );
1754
[436]1755    // get extended pointer on lock protecting attached process list
[428]1756    lock_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.lock );
1757
1758    // unlink process from attached process list
1759    remote_spinlock_lock( lock_xp );
[436]1760    xlist_unlink( XPTR( process_cxy , &process_ptr->txt_list ) );
[428]1761    remote_spinlock_unlock( lock_xp );
[436]1762
[438]1763#if DEBUG_PROCESS_TXT_ATTACH
[441]1764cycle  = (uint32_t)hal_get_cycles();
1765uint32_t txt_id = hal_remote_lw( XPTR( chdev_cxy , &chdev_ptr->channel ) );
[438]1766if( DEBUG_PROCESS_TXT_ATTACH < cycle )
[441]1767printk("\n[DBG] %s : thread %x exit / process %x detached from TXT %d / cycle %d\n",
1768__FUNCTION__, CURRENT_THREAD, process_pid, txt_id, cycle );
[433]1769#endif
[428]1770
1771} // end process_txt_detach()
1772
1773///////////////////////////////////////////////////
1774void process_txt_set_ownership( xptr_t process_xp )
1775{
1776    process_t * process_ptr;
1777    cxy_t       process_cxy;
[436]1778    pid_t       process_pid;
[428]1779    xptr_t      file_xp;
1780    xptr_t      txt_xp;     
1781    chdev_t   * txt_ptr;
1782    cxy_t       txt_cxy;
1783
[436]1784    // get pointers on process in owner cluster
[428]1785    process_cxy = GET_CXY( process_xp );
[435]1786    process_ptr = GET_PTR( process_xp );
[428]1787
[436]1788    // get process PID
1789    process_pid = hal_remote_lw( XPTR( process_cxy , &process_ptr->pid ) );
1790
1791    // check owner cluster
1792    assert( (process_cxy == CXY_FROM_PID( process_pid )) , __FUNCTION__,
1793    "process descriptor not in owner cluster\n" );
1794
[438]1795#if DEBUG_PROCESS_TXT_ATTACH
[436]1796uint32_t cycle = (uint32_t)hal_get_cycles();
[438]1797if( DEBUG_PROCESS_TXT_ATTACH < cycle )
[436]1798printk("\n[DBG] %s : thread %x enter for process %x / cycle %d\n",
1799__FUNCTION__, CURRENT_THREAD, process_pid, cycle );
1800#endif
1801
[428]1802    // get extended pointer on stdin pseudo file
1803    file_xp = hal_remote_lwd( XPTR( process_cxy , &process_ptr->fd_array.array[0] ) );
1804
1805    // get pointers on TXT chdev
1806    txt_xp  = chdev_from_file( file_xp );
1807    txt_cxy = GET_CXY( txt_xp );
[435]1808    txt_ptr = GET_PTR( txt_xp );
[428]1809
1810    // set owner field in TXT chdev
1811    hal_remote_swd( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , process_xp );
1812
[438]1813#if DEBUG_PROCESS_TXT_ATTACH
[436]1814cycle = (uint32_t)hal_get_cycles();
[438]1815if( DEBUG_PROCESS_TXT_ATTACH < cycle )
[436]1816printk("\n[DBG] %s : thread %x exit for process %x / cycle %d\n",
1817__FUNCTION__, CURRENT_THREAD, process_pid, cycle );
1818#endif
1819
[428]1820}  // end process_txt_set ownership()
1821
[436]1822////////////////////////////////////////////////////////
1823void process_txt_transfer_ownership( xptr_t process_xp )
[428]1824{
[436]1825    process_t * process_ptr;     // local pointer on process releasing ownership
1826    cxy_t       process_cxy;     // process cluster
1827    pid_t       process_pid;     // process identifier
[428]1828    xptr_t      file_xp;         // extended pointer on TXT_RX pseudo file
1829    xptr_t      txt_xp;          // extended pointer on TXT_RX chdev
[433]1830    chdev_t   * txt_ptr;         // local pointer on TXT_RX chdev
1831    cxy_t       txt_cxy;         // cluster of TXT_RX chdev
1832    uint32_t    txt_id;          // TXT_RX channel
[428]1833    xptr_t      owner_xp;        // extended pointer on current TXT_RX owner
1834    xptr_t      root_xp;         // extended pointer on root of attached process list
[436]1835    xptr_t      lock_xp;         // extended pointer on lock protecting attached process list
[428]1836    xptr_t      iter_xp;         // iterator for xlist
1837    xptr_t      current_xp;      // extended pointer on current process
[433]1838    process_t * current_ptr;     // local pointer on current process
1839    cxy_t       current_cxy;     // cluster for current process
[428]1840
[436]1841    // get pointers on process in owner cluster
[428]1842    process_cxy = GET_CXY( process_xp );
[435]1843    process_ptr = GET_PTR( process_xp );
[428]1844
[436]1845    // get process PID
1846    process_pid = hal_remote_lw( XPTR( process_cxy , &process_ptr->pid ) );
1847
1848    // check owner cluster
1849    assert( (process_cxy == CXY_FROM_PID( process_pid )) , __FUNCTION__,
1850    "process descriptor not in owner cluster\n" );
1851
[438]1852#if DEBUG_PROCESS_TXT_ATTACH
[436]1853uint32_t cycle = (uint32_t)hal_get_cycles();
[438]1854if( DEBUG_PROCESS_TXT_ATTACH < cycle )
[441]1855printk("\n[DBG] %s : thread %x enter / process %x / cycle %d\n",
1856__FUNCTION__, CURRENT_THREAD, process_pid, cycle );
[436]1857#endif
1858
[428]1859    // get extended pointer on stdin pseudo file
1860    file_xp = hal_remote_lwd( XPTR( process_cxy , &process_ptr->fd_array.array[0] ) );
1861
1862    // get pointers on TXT chdev
1863    txt_xp  = chdev_from_file( file_xp );
1864    txt_cxy = GET_CXY( txt_xp );
[433]1865    txt_ptr = GET_PTR( txt_xp );
[428]1866
[433]1867    // get extended pointer on TXT_RX owner and TXT channel
[428]1868    owner_xp = hal_remote_lwd( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) );
[433]1869    txt_id   = hal_remote_lw ( XPTR( txt_cxy , &txt_ptr->channel ) );
[428]1870
[438]1871#if( DEBUG_PROCESS_TXT_ATTACH & 1 )
1872if( DEBUG_PROCESS_TXT_ATTACH < cycle )
[436]1873printk("\n[DBG] %s : file_ptr %x / txt_ptr %x / txt_id %d / owner_ptr = %x\n",
1874__FUNCTION__, GET_PTR(file_xp), txt_ptr, txt_id, GET_PTR(owner_xp) );
1875#endif
1876
1877    // transfer ownership only if process is the TXT owner
1878    if( (owner_xp == process_xp) && (txt_id > 0) ) 
[428]1879    {
[436]1880        // get extended pointers on root and lock of attached processes list
1881        root_xp = XPTR( txt_cxy , &txt_ptr->ext.txt.root );
1882        lock_xp = XPTR( txt_cxy , &txt_ptr->ext.txt.lock );
[428]1883
[436]1884        // get lock
1885        remote_spinlock_lock( lock_xp );
1886
1887        if( process_get_ppid( process_xp ) != 1 )           // process is not KSH
[428]1888        {
1889
[438]1890#if( DEBUG_PROCESS_TXT_ATTACH & 1 )
1891if( DEBUG_PROCESS_TXT_ATTACH < cycle )
[436]1892printk("\n[DBG] %s : process is not the KSH process => search the KSH\n", __FUNCTION__ );
1893#endif
1894            // scan attached process list to find KSH process
1895            XLIST_FOREACH( root_xp , iter_xp )
1896            {
1897                current_xp  = XLIST_ELEMENT( iter_xp , process_t , txt_list );
1898                current_cxy = GET_CXY( current_xp );
1899                current_ptr = GET_PTR( current_xp );
[435]1900
[436]1901                if( process_get_ppid( current_xp ) == 1 )  // current is KSH
1902                {
1903                    // release lock
1904                    remote_spinlock_unlock( lock_xp );
1905
1906                    // set owner field in TXT chdev
1907                    hal_remote_swd( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , current_xp );
1908
[438]1909#if DEBUG_PROCESS_TXT_ATTACH
[436]1910cycle = (uint32_t)hal_get_cycles();
[438]1911if( DEBUG_PROCESS_TXT_ATTACH < cycle )
[436]1912printk("\n[DBG] %s : thread %x exit / process %x to KSH process %x / cycle %d\n",
1913__FUNCTION__, CURRENT_THREAD, process_pid, 
1914hal_remote_lw( XPTR( current_cxy , &current_ptr->pid ) ), cycle );
1915#endif
1916                     return;
1917                }
1918            }
1919 
1920            // release lock
1921            remote_spinlock_unlock( lock_xp );
1922
1923            // PANIC if KSH not found
1924            assert( false , __FUNCTION__ , "KSH process not found for TXT %d" ); 
1925
1926            return;
1927        }
1928        else                                               // process is KSH
1929        {
1930
[438]1931#if( DEBUG_PROCESS_TXT_ATTACH & 1 )
1932if( DEBUG_PROCESS_TXT_ATTACH < cycle )
[436]1933printk("\n[DBG] %s : process is the KSH process => search another\n", __FUNCTION__ );
1934#endif
1935
1936            // scan attached process list to find another process
1937            XLIST_FOREACH( root_xp , iter_xp )
[428]1938            {
[436]1939                current_xp  = XLIST_ELEMENT( iter_xp , process_t , txt_list );
1940                current_cxy = GET_CXY( current_xp );
1941                current_ptr = GET_PTR( current_xp );
1942
1943                if( current_xp != process_xp )            // current is not KSH
1944                {
1945                    // release lock
1946                    remote_spinlock_unlock( lock_xp );
1947
1948                    // set owner field in TXT chdev
1949                    hal_remote_swd( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , current_xp );
1950
[438]1951#if DEBUG_PROCESS_TXT_ATTACH
[436]1952cycle = (uint32_t)hal_get_cycles();
[438]1953if( DEBUG_PROCESS_TXT_ATTACH < cycle )
[436]1954printk("\n[DBG] %s : thread %x exit / KSH process %x to process %x / cycle %d\n",
1955__FUNCTION__, CURRENT_THREAD, process_pid,
1956hal_remote_lw( XPTR( current_cxy , &current_ptr->pid ) ), cycle );
1957#endif
1958                     return;
1959                }
[428]1960            }
[436]1961
1962            // release lock
1963            remote_spinlock_unlock( lock_xp );
1964
1965            // no more owner for TXT if no other process found
1966            hal_remote_swd( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , XPTR_NULL );
1967
[438]1968#if DEBUG_PROCESS_TXT_ATTACH
[436]1969cycle = (uint32_t)hal_get_cycles();
[438]1970if( DEBUG_PROCESS_TXT_ATTACH < cycle )
[436]1971printk("\n[DBG] %s : thread %x exit / KSH process %x to nobody / cycle %d\n",
1972__FUNCTION__, CURRENT_THREAD, process_pid, cycle );
1973#endif
1974            return;
[428]1975        }
[436]1976    }
1977    else
1978    {
[433]1979
[438]1980#if DEBUG_PROCESS_TXT_ATTACH
[436]1981cycle = (uint32_t)hal_get_cycles();
[438]1982if( DEBUG_PROCESS_TXT_ATTACH < cycle )
[436]1983printk("\n[DBG] %s : thread %x exit / process %x is not TXT owner / cycle %d\n",
1984__FUNCTION__, CURRENT_THREAD, process_pid, cycle );
1985#endif
1986
[428]1987    }
[436]1988}  // end process_txt_transfer_ownership()
[428]1989
1990
[436]1991////////////////////////////////////////////////     
1992xptr_t process_txt_get_owner( uint32_t channel )
[435]1993{
1994    xptr_t      txt_rx_xp  = chdev_dir.txt_rx[channel];
1995    cxy_t       txt_rx_cxy = GET_CXY( txt_rx_xp );
1996    chdev_t *   txt_rx_ptr = GET_PTR( txt_rx_xp );
1997
[436]1998    return (xptr_t)hal_remote_lwd( XPTR( txt_rx_cxy , &txt_rx_ptr->ext.txt.owner_xp ) );
[435]1999}
2000
2001///////////////////////////////////////////
2002void process_txt_display( uint32_t txt_id )
2003{
2004    xptr_t      chdev_xp;
2005    cxy_t       chdev_cxy;
2006    chdev_t   * chdev_ptr;
2007    xptr_t      root_xp;
2008    xptr_t      lock_xp;
2009    xptr_t      current_xp;
2010    xptr_t      iter_xp;
[443]2011    cxy_t       txt0_cxy;
2012    chdev_t   * txt0_ptr;
2013    xptr_t      txt0_xp;
2014    xptr_t      txt0_lock_xp;
2015    reg_t       txt0_save_sr;    // save SR to take TXT0 lock in busy mode
2016   
[435]2017    assert( (txt_id < LOCAL_CLUSTER->nb_txt_channels) ,
2018    __FUNCTION__ , "illegal TXT terminal index" );
2019
[443]2020    // get pointers on TXT0 chdev
2021    txt0_xp  = chdev_dir.txt_tx[0];
2022    txt0_cxy = GET_CXY( txt0_xp );
2023    txt0_ptr = GET_PTR( txt0_xp );
2024
2025    // get extended pointer on TXT0 lock
2026    txt0_lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock );
2027
[435]2028    // get pointers on TXT_RX[txt_id] chdev
2029    chdev_xp  = chdev_dir.txt_rx[txt_id];
2030    chdev_cxy = GET_CXY( chdev_xp );
2031    chdev_ptr = GET_PTR( chdev_xp );
2032
2033    // get extended pointer on root & lock of attached process list
2034    root_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.root );
2035    lock_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.lock );
2036
[443]2037    // get lock on attached process list
2038    remote_spinlock_lock( lock_xp );
2039
2040    // get TXT0 lock in busy waiting mode
2041    remote_spinlock_lock_busy( txt0_lock_xp , &txt0_save_sr );
2042
[435]2043    // display header
[443]2044    nolock_printk("\n***** processes attached to TXT_%d / cycle %d\n",
2045    txt_id , (uint32_t)hal_get_cycles() );
[435]2046
[436]2047    // scan attached process list
[435]2048    XLIST_FOREACH( root_xp , iter_xp )
2049    {
2050        current_xp  = XLIST_ELEMENT( iter_xp , process_t , txt_list );
2051        process_display( current_xp );
2052    }
2053
[443]2054    // release TXT0 lock in busy waiting mode
2055    remote_spinlock_unlock_busy( txt0_lock_xp , txt0_save_sr );
2056
2057    // release lock on attached process list
[435]2058    remote_spinlock_unlock( lock_xp );
2059
2060}  // end process_txt_display
Note: See TracBrowser for help on using the repository browser.