source: trunk/kernel/kern/process.c @ 426

Last change on this file since 426 was 416, checked in by alain, 6 years ago

Improve sys_exec.

File size: 46.9 KB
Line 
1/*
2 * process.c - process related management
3 *
4 * Authors  Ghassan Almaless (2008,2009,2010,2011,2012)
5 *          Mohamed Lamine Karaoui (2015)
6 *          Alain Greiner (2016,2017)
7 *
8 * Copyright (c) UPMC Sorbonne Universites
9 *
10 * This file is part of ALMOS-MKH.
11 *
12 * ALMOS-MKH is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; version 2.0 of the License.
15 *
16 * ALMOS-MKH is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with ALMOS-MKH; if not, write to the Free Software Foundation,
23 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24 */
25
26#include <kernel_config.h>
27#include <hal_types.h>
28#include <hal_remote.h>
29#include <hal_uspace.h>
30#include <hal_irqmask.h>
31#include <errno.h>
32#include <printk.h>
33#include <memcpy.h>
34#include <bits.h>
35#include <kmem.h>
36#include <page.h>
37#include <vmm.h>
38#include <vfs.h>
39#include <core.h>
40#include <thread.h>
41#include <list.h>
42#include <string.h>
43#include <scheduler.h>
44#include <remote_spinlock.h>
45#include <dqdt.h>
46#include <cluster.h>
47#include <ppm.h>
48#include <boot_info.h>
49#include <process.h>
50#include <elf.h>
51#include <syscalls.h>
52#include <signal.h>
53
54//////////////////////////////////////////////////////////////////////////////////////////
55// Extern global variables
56//////////////////////////////////////////////////////////////////////////////////////////
57
58extern process_t process_zero;
59
60//////////////////////////////////////////////////////////////////////////////////////////
61// Process initialisation related functions
62//////////////////////////////////////////////////////////////////////////////////////////
63
64///////////////////////////
65process_t * process_alloc()
66{
67        kmem_req_t   req;
68
69    req.type  = KMEM_PROCESS;
70        req.size  = sizeof(process_t);
71        req.flags = AF_KERNEL;
72
73    return (process_t *)kmem_alloc( &req );
74}
75
76////////////////////////////////////////
77void process_free( process_t * process )
78{
79    kmem_req_t  req;
80
81        req.type = KMEM_PROCESS;
82        req.ptr  = process;
83        kmem_free( &req );
84}
85
86/////////////////////////////////////////////
87void process_zero_init( process_t * process )
88{
89    // initialize PID, PPID anf PREF
90    process->pid    = 0;
91    process->ppid   = 0;
92    process->ref_xp = XPTR( local_cxy , process );
93
94    // reset th_tbl[] array as empty
95    uint32_t i;
96    for( i = 0 ; i < CONFIG_THREAD_MAX_PER_CLUSTER ; i++ )
97        {
98        process->th_tbl[i] = NULL;
99    }
100    process->th_nr  = 0;
101    spinlock_init( &process->th_lock );
102
103        hal_fence();
104
105process_dmsg("\n[DBG] %s : core[%x,%d] exit for process %x\n",
106__FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , process->pid );
107
108}  // end process_zero_init()
109
110/////////////////////////////////////////////////
111void process_reference_init( process_t * process,
112                             pid_t       pid,
113                             pid_t       ppid,
114                             xptr_t      model_xp )
115{
116    cxy_t       model_cxy;
117    process_t * model_ptr;
118        error_t     error1;
119        error_t     error2;
120        error_t     error3;
121    xptr_t      stdin_xp;
122    xptr_t      stdout_xp;
123    xptr_t      stderr_xp;
124    uint32_t    stdin_id;
125    uint32_t    stdout_id;
126    uint32_t    stderr_id;
127    error_t     error;
128
129process_dmsg("\n[DBG] %s : core[%x,%d] enters for process %x / ppid = %x\n",
130__FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , pid , ppid );
131
132    // get model process cluster and local pointer
133    model_cxy = GET_CXY( model_xp );
134    model_ptr = (process_t *)GET_PTR( model_xp );
135
136    // initialize PID, PPID, and REF
137        process->pid    = pid;
138    process->ppid   = ppid;
139    process->ref_xp = XPTR( local_cxy , process );
140
141    // initialize vmm as empty
142    error = vmm_init( process );
143    assert( (error == 0) , __FUNCTION__ , "cannot initialize VMM\n" );
144 
145
146process_dmsg("\n[DBG] %s : core[%x,%d] / vmm empty for process %x\n", 
147__FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , pid );
148
149    // initialize fd_array as empty
150    process_fd_init( process );
151
152    // create stdin / stdout / stderr pseudo-files
153    if( ppid == 0 )                                       // process_init
154    {
155        error1 = vfs_open( process,
156                           CONFIG_INIT_STDIN,
157                           O_RDONLY, 
158                           0,                // FIXME chmod
159                           &stdin_xp, 
160                           &stdin_id );
161
162        error2 = vfs_open( process,
163                           CONFIG_INIT_STDOUT,
164                           O_WRONLY, 
165                           0,                // FIXME chmod
166                           &stdout_xp, 
167                           &stdout_id );
168
169        error3 = vfs_open( process,
170                           CONFIG_INIT_STDERR,
171                           O_WRONLY, 
172                           0,                // FIXME chmod
173                           &stderr_xp, 
174                           &stderr_id );
175    }
176    else                                                  // any other process
177    {
178        error1 = vfs_open( process,
179                           CONFIG_USER_STDIN,
180                           O_RDONLY, 
181                           0,                // FIXME chmod
182                           &stdin_xp, 
183                           &stdin_id );
184
185        error2 = vfs_open( process,
186                           CONFIG_USER_STDOUT,
187                           O_WRONLY, 
188                           0,                // FIXME chmod
189                           &stdout_xp, 
190                           &stdout_id );
191
192        error3 = vfs_open( process,
193                           CONFIG_USER_STDERR,
194                           O_WRONLY, 
195                           0,                // FIXME chmod
196                           &stderr_xp, 
197                           &stderr_id );
198    }
199
200    assert( ((error1 == 0) && (error2 == 0) && (error3 == 0)) , __FUNCTION__ ,
201    "cannot open stdin/stdout/stderr pseudo files\n");
202
203    assert( ((stdin_id == 0) && (stdout_id == 1) && (stderr_id == 2)) , __FUNCTION__ ,
204    "bad indexes : stdin %d / stdout %d / stderr %d \n", stdin_id , stdout_id , stderr_id );
205
206    // initialize specific inodes root and cwd
207    process->vfs_root_xp = (xptr_t)hal_remote_lwd( XPTR( model_cxy,
208                                                         &model_ptr->vfs_root_xp ) );
209    process->vfs_cwd_xp  = (xptr_t)hal_remote_lwd( XPTR( model_cxy,
210                                                         &model_ptr->vfs_cwd_xp ) );
211    vfs_inode_remote_up( process->vfs_root_xp );
212    vfs_inode_remote_up( process->vfs_cwd_xp );
213
214    remote_rwlock_init( XPTR( local_cxy , &process->cwd_lock ) );
215
216    // copy all open file descriptors (other than stdin / stdout / stderr)
217    process_fd_remote_copy( XPTR( local_cxy , &process->fd_array ),
218                            XPTR( model_cxy , &model_ptr->fd_array ) );
219
220process_dmsg("\n[DBG] %s : core[%x,%d] / fd array for process %x\n", 
221__FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , pid );
222
223    // reset children list root
224    xlist_root_init( XPTR( local_cxy , &process->children_root ) );
225    process->children_nr     = 0;
226
227    // reset semaphore / mutex / barrier / condvar list roots
228    xlist_root_init( XPTR( local_cxy , &process->sem_root ) );
229    xlist_root_init( XPTR( local_cxy , &process->mutex_root ) );
230    xlist_root_init( XPTR( local_cxy , &process->barrier_root ) );
231    xlist_root_init( XPTR( local_cxy , &process->condvar_root ) );
232    remote_spinlock_init( XPTR( local_cxy , &process->sync_lock ) );
233
234    // register new process in the local cluster manager pref_tbl[]
235    lpid_t lpid = LPID_FROM_PID( pid );
236    LOCAL_CLUSTER->pmgr.pref_tbl[lpid] = XPTR( local_cxy , process );
237
238    // register new process descriptor in local cluster manager local_list
239    cluster_process_local_link( process );
240
241    // register new process descriptor in local cluster manager copies_list
242    cluster_process_copies_link( process );
243
244    // reset th_tbl[] array as empty in process descriptor
245    uint32_t i;
246    for( i = 0 ; i < CONFIG_THREAD_MAX_PER_CLUSTER ; i++ )
247        {
248        process->th_tbl[i] = NULL;
249    }
250    process->th_nr  = 0;
251    spinlock_init( &process->th_lock );
252
253        hal_fence();
254
255process_dmsg("\n[DBG] %s : core[%x,%d] exit for process %x\n",
256__FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , pid );
257
258}  // process_reference init()
259
260/////////////////////////////////////////////////////
261error_t process_copy_init( process_t * local_process,
262                           xptr_t      reference_process_xp )
263{
264    error_t error;
265
266    // get reference process cluster and local pointer
267    cxy_t       ref_cxy = GET_CXY( reference_process_xp );
268    process_t * ref_ptr = (process_t *)GET_PTR( reference_process_xp );
269
270    // set the pid, ppid, ref_xp fields in local process
271    local_process->pid    = hal_remote_lw( XPTR( ref_cxy , &ref_ptr->pid ) );
272    local_process->ppid   = hal_remote_lw( XPTR( ref_cxy , &ref_ptr->ppid ) );
273    local_process->ref_xp = reference_process_xp;
274
275process_dmsg("\n[DBG] %s : core[%x,%d] enters for process %x in cluster %x\n",
276__FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , local_process->pid );
277
278    // reset local process vmm
279    error = vmm_init( local_process );
280    assert( (error == 0) , __FUNCTION__ , "cannot initialize VMM\n");
281
282    // reset process file descriptors array
283        process_fd_init( local_process );
284
285    // reset vfs_root_xp / vfs_bin_xp / vfs_cwd_xp fields
286    local_process->vfs_root_xp = hal_remote_lwd( XPTR( ref_cxy , &ref_ptr->vfs_root_xp ) );
287    local_process->vfs_bin_xp  = hal_remote_lwd( XPTR( ref_cxy , &ref_ptr->vfs_bin_xp ) );
288    local_process->vfs_cwd_xp  = XPTR_NULL;
289
290    // reset children list root (not used in a process descriptor copy)
291    xlist_root_init( XPTR( local_cxy , &local_process->children_root ) );
292    local_process->children_nr   = 0;
293
294    // reset brothers list (not used in a process descriptor copy)
295    xlist_entry_init( XPTR( local_cxy , &local_process->brothers_list ) );
296
297    // reset semaphores list root (not used in a process descriptor copy)
298    xlist_root_init( XPTR( local_cxy , &local_process->sem_root ) );
299    xlist_root_init( XPTR( local_cxy , &local_process->mutex_root ) );
300    xlist_root_init( XPTR( local_cxy , &local_process->barrier_root ) );
301    xlist_root_init( XPTR( local_cxy , &local_process->condvar_root ) );
302
303    // reset th_tbl[] array as empty
304    uint32_t i;
305    for( i = 0 ; i < CONFIG_THREAD_MAX_PER_CLUSTER ; i++ )
306        {
307        local_process->th_tbl[i] = NULL;
308    }
309    local_process->th_nr  = 0;
310    spinlock_init( &local_process->th_lock );
311
312    // register new process descriptor in local cluster manager local_list
313    cluster_process_local_link( local_process );
314
315    // register new process descriptor in owner cluster manager copies_list
316    cluster_process_copies_link( local_process );
317
318        hal_fence();
319
320process_dmsg("\n[DBG] %s : core[%x,%d] exit for process %x in cluster %x\n",
321__FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , local_process->pid );
322
323    return 0;
324
325} // end process_copy_init()
326
327///////////////////////////////////////////
328void process_destroy( process_t * process )
329{
330        if( process->th_nr != 0 )
331    {
332        panic("process %x in cluster %x has still active threads",
333              process->pid , local_cxy );
334    }
335
336    // get local process manager pointer
337    pmgr_t * pmgr = &LOCAL_CLUSTER->pmgr;
338
339    // remove the process descriptor from local_list in cluster manager
340    remote_spinlock_lock( XPTR( local_cxy , &pmgr->local_lock ) );
341    xlist_unlink( XPTR( local_cxy , &process->local_list ) );
342    remote_spinlock_unlock( XPTR( local_cxy , &pmgr->local_lock ) );
343
344    // get extended pointer on copies_lock in owner cluster manager
345    cxy_t  owner_cxy    = CXY_FROM_PID( process->pid );
346        lpid_t lpid         = LPID_FROM_PID( process->pid );
347    xptr_t copies_lock  = XPTR( owner_cxy , &pmgr->copies_lock[lpid] );
348
349    // remove the local process descriptor from copies_list
350    remote_spinlock_lock( copies_lock );
351    xlist_unlink( XPTR( local_cxy , &process->copies_list ) );
352    remote_spinlock_unlock( copies_lock );
353
354    // release the process PID to cluster manager
355    cluster_pid_release( process->pid );
356
357        hal_fence();
358
359    // From this point, the process descriptor is unreachable
360
361    // FIXME close all open files and update dirty [AG]
362
363    // Decrease refcount for bin file, root file and cwd file
364        if( process->vfs_bin_xp  != XPTR_NULL ) vfs_file_count_down( process->vfs_bin_xp );
365        if( process->vfs_root_xp != XPTR_NULL ) vfs_file_count_down( process->vfs_root_xp );
366        if( process->vfs_cwd_xp  != XPTR_NULL ) vfs_file_count_down( process->vfs_cwd_xp );
367
368    // Destroy VMM
369    vmm_destroy( process );
370
371    // release memory allocated to process descriptor
372    process_free( process );
373
374}  // end process_destroy()
375
376/////////////////////////////////////////////////
377char * process_action_str( uint32_t action_type )
378{
379    if     ( action_type == BLOCK_ALL_THREADS   ) return "BLOCK";
380    else if( action_type == UNBLOCK_ALL_THREADS ) return "UNBLOCK";
381    else if( action_type == DELETE_ALL_THREADS  ) return "DELETE";
382    else                                          return "undefined";
383}
384
385////////////////////////////////////////////
386void process_sigaction( process_t * process,
387                        uint32_t    action_type )
388{
389    cxy_t              owner_cxy;         // owner cluster identifier
390    lpid_t             lpid;              // process index in owner cluster
391    cluster_t        * cluster;           // pointer on cluster manager
392    xptr_t             root_xp;           // extended pointer on root of copies
393    xptr_t             lock_xp;           // extended pointer on lock protecting copies
394    xptr_t             iter_xp;           // iterator on copies list
395    xptr_t             process_xp;        // extended pointer on process copy
396    cxy_t              process_cxy;       // process copy cluster identifier
397    process_t        * process_ptr;       // local pointer on process copy
398    uint32_t           responses;         // number of remote process copies
399    uint32_t           rsp_count;         // used to assert number of copies
400
401    rpc_desc_t         rpc;               // rpc descriptor allocated in stack
402
403sigaction_dmsg("\n[DBG] %s : enter to %s process %x in cluster %x\n",
404__FUNCTION__ , process_action_str( action_type ) , process->pid , local_cxy );
405
406    thread_t         * client = CURRENT_THREAD;
407    xptr_t             client_xp = XPTR( local_cxy , client );
408
409    // get local pointer on local cluster manager
410    cluster = LOCAL_CLUSTER;
411
412    // get owner cluster identifier and process lpid
413    owner_cxy = CXY_FROM_PID( process->pid );
414    lpid      = LPID_FROM_PID( process->pid );
415
416    // check owner cluster
417    assert( (owner_cxy == local_cxy) , __FUNCTION__ ,
418    "must be executed in the owner cluster\n" ); 
419   
420    // get number of remote copies
421    responses = cluster->pmgr.copies_nr[lpid] - 1;
422    rsp_count = 0;
423
424    // check action type
425    assert( ((action_type == DELETE_ALL_THREADS ) ||
426             (action_type == BLOCK_ALL_THREADS )  ||
427             (action_type == UNBLOCK_ALL_THREADS )),
428             __FUNCTION__ , "illegal action type" );
429             
430    // initialise rpc descriptor
431    rpc.index    = RPC_PROCESS_SIGACTION;
432    rpc.response = responses;
433    rpc.blocking = false;
434    rpc.thread   = client;
435
436    // get extended pointers on copies root, copies lock, and number of copies
437    root_xp   = XPTR( local_cxy , &cluster->pmgr.copies_root[lpid] );
438    lock_xp   = XPTR( local_cxy , &cluster->pmgr.copies_lock[lpid] );
439
440    // take the lock protecting the copies
441    remote_spinlock_lock( lock_xp );
442
443    // send RPCs to remote clusters
444    XLIST_FOREACH( root_xp , iter_xp )
445    {
446        process_xp  = XLIST_ELEMENT( iter_xp , process_t , copies_list );
447        process_cxy = GET_CXY( process_xp );
448        process_ptr = (process_t *)GET_PTR( process_xp );
449
450        // send RPC to remote clusters
451        if( process_cxy != local_cxy ) 
452        {
453
454sigaction_dmsg("\n[DBG] %s : send RPC to remote cluster %x\n",
455__FUNCTION__ , process_cxy );
456
457            rpc.args[0] = (uint64_t)action_type;
458            rpc.args[1] = (uint64_t)(intptr_t)process_ptr;
459            rpc_process_sigaction_client( process_cxy , &rpc );
460            rsp_count++;
461        }
462    }
463   
464    // release the lock protecting process copies
465    remote_spinlock_unlock( lock_xp );
466
467    // check number of copies...
468    assert( (rsp_count == responses) , __FUNCTION__ ,
469    "unconsistent number of process copies : rsp_count = %d / responses = %d",
470    rsp_count , responses );
471
472    // block and deschedule to wait RPC responses if required
473    if( responses )
474    {   
475        thread_block( CURRENT_THREAD , THREAD_BLOCKED_RPC );
476        sched_yield("BLOCKED on RPC_PROCESS_SIGACTION");
477    }
478
479sigaction_dmsg("\n[DBG] %s : make action in owner cluster %x\n",
480__FUNCTION__ , local_cxy );
481
482
483    // call directly the relevant function in local owner cluster
484    if      (action_type == DELETE_ALL_THREADS  ) process_delete ( process , client_xp ); 
485    else if (action_type == BLOCK_ALL_THREADS   ) process_block  ( process , client_xp ); 
486    else if (action_type == UNBLOCK_ALL_THREADS ) process_unblock( process             );
487
488sigaction_dmsg("\n[DBG] %s : exit after %s process %x in cluster %x\n",
489__FUNCTION__ , process_action_str( action_type ) , process->pid , local_cxy );
490
491}  // end process_sigaction()
492
493////////////////////////////////////////
494void process_block( process_t * process,
495                    xptr_t      client_xp )
496{
497    thread_t          * target;         // pointer on target thread
498    uint32_t            ltid;           // index in process th_tbl
499    thread_t          * requester;      // requesting thread pointer
500    uint32_t            count;          // requests counter
501    volatile uint32_t   rsp_count;      // responses counter
502
503    // get calling thread pointer
504    requester = CURRENT_THREAD;
505
506sigaction_dmsg("\n[DBG] %s : enter for process %x in cluster %x\n",
507__FUNCTION__ , process->pid , local_cxy );
508
509    // get lock protecting process th_tbl[]
510    spinlock_lock( &process->th_lock );
511
512    // initialize local responses counter
513    rsp_count = process->th_nr;
514
515    // loop on process threads to block and deschedule all threads in cluster
516    // we use both "ltid" and "count" because it can exist "holes" in th_tbl
517    for( ltid = 0 , count = 0 ; count < process->th_nr ; ltid++ )
518    {
519        target = process->th_tbl[ltid];
520
521        if( target != NULL )             // thread found
522        {
523            count++;
524
525            // - if the target thread is the client thread, we do nothing,
526            //   and simply decrement the responses counter.
527            // - if the calling thread and the target thread are on the same core,
528            //   we block the target thread, we don't ask ask anything to the scheduler,
529            //   and simply decrement the responses counter.
530            // - if the calling thread and the target thread are not running on the same
531            //   core, we ask the target scheduler to acknowlege the blocking
532            //   to be sure that the target thread is not running.
533           
534            if( XPTR( local_cxy , target ) == client_xp )
535            {
536                // decrement responses counter
537                hal_atomic_add( (void *)&rsp_count , -1 );
538            }
539            else if( requester->core->lid == target->core->lid )
540            {
541                // set the global blocked bit in target thread descriptor.
542                thread_block( target , THREAD_BLOCKED_GLOBAL );
543
544                // decrement responses counter
545                hal_atomic_add( (void *)&rsp_count , -1 );
546            }
547            else
548            {
549                // set the global blocked bit in target thread descriptor.
550                thread_block( target , THREAD_BLOCKED_GLOBAL );
551
552                // set FLAG_REQ_ACK and &ack_rsp_count in target descriptor
553                thread_set_req_ack( target , (void *)&rsp_count );
554
555                // force scheduling on target thread
556                dev_pic_send_ipi( local_cxy , target->core->lid );
557            }
558        }
559    }
560
561    // get lock protecting process th_tbl[]
562    spinlock_unlock( &process->th_lock );
563
564    // wait all responses from schedulers
565    while( 1 )
566    {
567        // exit loop when all local responses received
568        if ( rsp_count == 0 ) break;
569   
570        // wait 1000 cycles before retry
571        hal_fixed_delay( 1000 );
572    }
573
574sigaction_dmsg("\n[DBG] %s : exit for process %x in cluster %x / %d threads blocked\n",
575__FUNCTION__ , process->pid , local_cxy , count );
576
577}  // end process_block()
578
579///////////////////////////////////////////
580void process_unblock( process_t * process )
581{
582    thread_t          * target;        // pointer on target thead
583    uint32_t            ltid;          // index in process th_tbl
584    uint32_t            count;         // requests counter
585
586sigaction_dmsg("\n[DBG] %s : enter for process %x in cluster %x\n",
587__FUNCTION__ , process->pid , local_cxy );
588
589    // get lock protecting process th_tbl[]
590    spinlock_lock( &process->th_lock );
591
592    // loop on process threads to unblock all threads in cluster
593    // we use both "ltid" and "count" because it can exist "holes" in th_tbl
594    for( ltid = 0 , count = 0 ; count < process->th_nr ; ltid++ )
595    {
596        target = process->th_tbl[ltid];
597
598        if( target != NULL )             // thread found
599        {
600            count++;
601
602            // reset the global blocked bit in target thread descriptor.
603            thread_unblock( XPTR( local_cxy , target ) , THREAD_BLOCKED_GLOBAL );
604        }
605    }
606
607    // get lock protecting process th_tbl[]
608    spinlock_unlock( &process->th_lock );
609
610sigaction_dmsg("\n[DBG] %s : exit for process %x in cluster %x / %d threads blocked\n",
611__FUNCTION__ , process->pid , local_cxy , count );
612
613}  // end process_unblock()
614
615/////////////////////////////////////////
616void process_delete( process_t * process,
617                     xptr_t      client_xp )
618{
619    thread_t          * target;        // pointer on target thread
620    uint32_t            ltid;          // index in process th_tbl
621    uint32_t            count;         // request counter
622    thread_t          * requester;     // pointer on calling thread
623
624sigaction_dmsg("\n[DBG] %s : enter for process %x in cluster %x at cycle %d\n",
625__FUNCTION__ , process->pid , local_cxy , (uint32_t)hal_get_cycles() );
626
627    // get calling thread pointer
628    requester = CURRENT_THREAD;
629
630    // get lock protecting process th_tbl[]
631    spinlock_lock( &process->th_lock );
632
633    // loop on threads to set the REQ_DELETE flag
634    // we use both "ltid" and "count" because it can exist "holes" in th_tbl
635    for( ltid = 0 , count = 0  ; count < process->th_nr ; ltid++ )
636    {
637        target = process->th_tbl[ltid];
638
639        if( target != NULL )             // thread found
640        {
641            count++;
642
643            // delete only if the target is not the client
644            if( XPTR( local_cxy , target ) != client_xp ) 
645            { 
646                hal_atomic_or( &target->flags , THREAD_FLAG_REQ_DELETE );
647            }
648        }
649    }
650
651    // get lock protecting process th_tbl[]
652    spinlock_unlock( &process->th_lock );
653
654sigaction_dmsg("\n[DBG] %s : exit for process %x in cluster %x at cycle %d\n",
655__FUNCTION__ , process->pid , local_cxy , (uint32_t)hal_get_cycles() );
656
657}  // end process_delete()
658
659///////////////////////////////////////////////
660process_t * process_get_local_copy( pid_t pid )
661{
662    error_t        error;
663    process_t    * process_ptr;   // local pointer on process
664    xptr_t         process_xp;    // extended pointer on process
665
666    cluster_t * cluster = LOCAL_CLUSTER;
667
668    // get lock protecting local list of processes
669    remote_spinlock_lock( XPTR( local_cxy , &cluster->pmgr.local_lock ) );
670
671    // scan the local list of process descriptors to find the process
672    xptr_t  iter;
673    bool_t  found = false;
674    XLIST_FOREACH( XPTR( local_cxy , &cluster->pmgr.local_root ) , iter )
675    {
676        process_xp  = XLIST_ELEMENT( iter , process_t , local_list );
677        process_ptr = (process_t *)GET_PTR( process_xp );
678        if( process_ptr->pid == pid )
679        {
680            found = true;
681            break;
682        }
683    }
684
685    // release lock protecting local list of processes
686    remote_spinlock_unlock( XPTR( local_cxy , &cluster->pmgr.local_lock ) );
687
688    // allocate memory for a new local process descriptor
689    // and initialise it from reference cluster if required
690    if( !found )
691    {
692        // get extended pointer on reference process descriptor
693        xptr_t ref_xp = cluster_get_reference_process_from_pid( pid );
694
695        assert( (ref_xp != XPTR_NULL) , __FUNCTION__ , "illegal pid\n" );
696
697        // allocate memory for local process descriptor
698        process_ptr = process_alloc();
699        if( process_ptr == NULL )  return NULL;
700
701        // initialize local process descriptor copy
702        error = process_copy_init( process_ptr , ref_xp );
703        if( error ) return NULL;
704    }
705
706    return process_ptr;
707
708}  // end process_get_local_copy()
709
710//////////////////////////////////////////////////////////////////////////////////////////
711// File descriptor array related functions
712//////////////////////////////////////////////////////////////////////////////////////////
713
714///////////////////////////////////////////
715void process_fd_init( process_t * process )
716{
717    uint32_t fd;
718
719    remote_spinlock_init( XPTR( local_cxy , &process->fd_array.lock ) );
720
721    process->fd_array.current = 0;
722
723    // initialize array
724    for ( fd = 0 ; fd < CONFIG_PROCESS_FILE_MAX_NR ; fd++ )
725    {
726        process->fd_array.array[fd] = XPTR_NULL;
727    }
728}
729
730//////////////////////////////
731bool_t process_fd_array_full()
732{
733    // get extended pointer on reference process
734    xptr_t ref_xp = CURRENT_THREAD->process->ref_xp;
735
736    // get reference process cluster and local pointer
737    process_t * ref_ptr = (process_t *)GET_PTR( ref_xp );
738    cxy_t       ref_cxy = GET_CXY( ref_xp );
739
740    // get number of open file descriptors from reference fd_array
741    uint32_t current = hal_remote_lw( XPTR( ref_cxy , &ref_ptr->fd_array.current ) );
742
743        return ( current >= CONFIG_PROCESS_FILE_MAX_NR );
744}
745
746/////////////////////////////////////////////////
747error_t process_fd_register( process_t * process,
748                             xptr_t      file_xp,
749                             uint32_t  * fdid )
750{
751    bool_t    found;
752    uint32_t  id;
753    xptr_t    xp;
754
755    // get reference process cluster and local pointer
756    xptr_t ref_xp = process->ref_xp;
757    process_t * ref_ptr = (process_t *)GET_PTR( ref_xp );
758    cxy_t       ref_cxy = GET_CXY( ref_xp );
759
760    // take lock protecting reference fd_array
761        remote_spinlock_lock( XPTR( ref_cxy , &ref_ptr->fd_array.lock ) );
762
763    found   = false;
764
765    for ( id = 0; id < CONFIG_PROCESS_FILE_MAX_NR ; id++ )
766    {
767        xp = hal_remote_lwd( XPTR( ref_cxy , &ref_ptr->fd_array.array[id] ) );
768        if ( xp == XPTR_NULL )
769        {
770            found = true;
771            hal_remote_swd( XPTR( ref_cxy , &ref_ptr->fd_array.array[id] ) , file_xp );
772                hal_remote_atomic_add( XPTR( ref_cxy , &ref_ptr->fd_array.current ) , 1 );
773                        *fdid = id;
774            break;
775        }
776    }
777
778    // release lock protecting reference fd_array
779        remote_spinlock_unlock( XPTR( ref_cxy , &ref_ptr->fd_array.lock ) );
780
781    if ( !found ) return EMFILE;
782    else          return 0;
783}
784
785////////////////////////////////////////////////
786xptr_t process_fd_get_xptr( process_t * process,
787                            uint32_t    fdid )
788{
789    xptr_t  file_xp;
790
791    // access local copy of process descriptor
792    file_xp = process->fd_array.array[fdid];
793
794    if( file_xp == XPTR_NULL )
795    {
796        // get reference process cluster and local pointer
797        xptr_t      ref_xp  = process->ref_xp;
798        cxy_t       ref_cxy = GET_CXY( ref_xp );
799        process_t * ref_ptr = (process_t *)GET_PTR( ref_xp );
800
801        // access reference process descriptor
802        file_xp = hal_remote_lwd( XPTR( ref_cxy , &ref_ptr->fd_array.array[fdid] ) );
803
804        // update local fd_array if found
805        if( file_xp != XPTR_NULL )
806        {
807            process->fd_array.array[fdid] = file_xp;
808        }
809    }
810
811    return file_xp;
812
813}  // end process_fd_get_xptr()
814
815///////////////////////////////////////////
816void process_fd_remote_copy( xptr_t dst_xp,
817                             xptr_t src_xp )
818{
819    uint32_t fd;
820    xptr_t   entry;
821
822    // get cluster and local pointer for src fd_array
823    cxy_t        src_cxy = GET_CXY( src_xp );
824    fd_array_t * src_ptr = (fd_array_t *)GET_PTR( src_xp );
825
826    // get cluster and local pointer for dst fd_array
827    cxy_t        dst_cxy = GET_CXY( dst_xp );
828    fd_array_t * dst_ptr = (fd_array_t *)GET_PTR( dst_xp );
829
830    // get the remote lock protecting the src fd_array
831        remote_spinlock_lock( XPTR( src_cxy , &src_ptr->lock ) );
832
833    // loop on all entries other than
834    // the three first entries: stdin/stdout/stderr
835    for( fd = 3 ; fd < CONFIG_PROCESS_FILE_MAX_NR ; fd++ )
836        {
837                entry = (xptr_t)hal_remote_lwd( XPTR( src_cxy , &src_ptr->array[fd] ) );
838
839                if( entry != XPTR_NULL )
840                {
841            // increment file descriptor ref count
842            vfs_file_count_up( entry );
843
844                        // copy entry in destination process fd_array
845                        hal_remote_swd( XPTR( dst_cxy , &dst_ptr->array[fd] ) , entry );
846                }
847        }
848
849    // release lock on source process fd_array
850        remote_spinlock_unlock( XPTR( src_cxy , &src_ptr->lock ) );
851
852}  // end process_fd_remote_copy()
853
854////////////////////////////////////////////////////////////////////////////////////
855//  Thread related functions
856////////////////////////////////////////////////////////////////////////////////////
857
858/////////////////////////////////////////////////////
859error_t process_register_thread( process_t * process,
860                                 thread_t  * thread,
861                                 trdid_t   * trdid )
862{
863    ltid_t   ltid;
864    bool_t   found;
865
866    assert( (process != NULL) , __FUNCTION__ , "process argument is NULL" );
867
868    assert( (thread != NULL) , __FUNCTION__ , "thread argument is NULL" );
869
870    // search a free slot in th_tbl[]
871    // 0 is not a valid ltid value
872    found = false;
873    for( ltid = 1 ; ltid < CONFIG_THREAD_MAX_PER_CLUSTER ; ltid++ )
874    {
875        if( process->th_tbl[ltid] == NULL )
876        {
877            found = true;
878            break;
879        }
880    }
881
882    if( found )
883    {
884        // register thread in th_tbl[]
885        process->th_tbl[ltid] = thread;
886        process->th_nr++;
887
888        // returns trdid
889        *trdid = TRDID( local_cxy , ltid );
890    }
891
892    return (found) ? 0 : ENOMEM;
893
894}  // end process_register_thread()
895
896///////////////////////////////////////////////
897void process_remove_thread( thread_t * thread )
898{
899    assert( (thread != NULL) , __FUNCTION__ , "thread argument is NULL" );
900
901    process_t * process = thread->process;
902
903    // get thread local index
904    ltid_t  ltid = LTID_FROM_TRDID( thread->trdid );
905
906    // remove thread from th_tbl[]
907    process->th_tbl[ltid] = NULL;
908    process->th_nr--;
909
910}  // process_remove_thread()
911
912/////////////////////////////////////////////////////////
913error_t process_make_fork( xptr_t      parent_process_xp,
914                           xptr_t      parent_thread_xp,
915                           pid_t     * child_pid,
916                           thread_t ** child_thread )
917{
918    process_t * process;         // local pointer on child process descriptor
919    thread_t  * thread;          // local pointer on child thread descriptor
920    pid_t       new_pid;         // process identifier for child process
921    pid_t       parent_pid;      // process identifier for parent process
922    xptr_t      ref_xp;          // extended pointer on reference process
923    error_t     error;
924
925    // get cluster and local pointer for parent process
926    cxy_t       parent_process_cxy = GET_CXY( parent_process_xp );
927    process_t * parent_process_ptr = (process_t *)GET_PTR( parent_process_xp );
928
929    // get parent process PID
930    parent_pid = hal_remote_lw( XPTR( parent_process_cxy , &parent_process_ptr->pid ) );
931   
932    // check parent process is the reference
933    ref_xp = hal_remote_lwd( XPTR( parent_process_cxy , &parent_process_ptr->ref_xp ) );
934    assert( (parent_process_xp == ref_xp ) , __FUNCTION__ ,
935    "parent process must be the reference process\n" );
936
937fork_dmsg("\n[DBG] %s : core[%x,%d] enter at cycle %d\n",
938__FUNCTION__, local_cxy, CURRENT_THREAD->core->lid , (uint32_t)hal_get_cycles() );
939
940    // allocate a process descriptor
941    process = process_alloc();
942    if( process == NULL )
943    {
944        printk("\n[ERROR] in %s : cannot get process in cluster %x\n", 
945        __FUNCTION__, local_cxy ); 
946        return -1;
947    }
948
949fork_dmsg("\n[DBG] %s : core[%x,%d] child process descriptor allocated at cycle %d\n",
950 __FUNCTION__ , local_cxy, CURRENT_THREAD->core->lid, (uint32_t)hal_get_cycles() );
951
952    // allocate a child PID from local cluster
953    error = cluster_pid_alloc( process , &new_pid );
954    if( (error != 0) || (new_pid == 0) )
955    {
956        printk("\n[ERROR] in %s : cannot get PID in cluster %x\n", 
957        __FUNCTION__, local_cxy ); 
958        process_free( process );
959        return -1;
960    }
961
962fork_dmsg("\n[DBG] %s : core[%x, %d] child process PID allocated = %x at cycle %d\n",
963 __FUNCTION__ , local_cxy, CURRENT_THREAD->core->lid, new_pid , (uint32_t)hal_get_cycles() );
964
965    // initializes child process descriptor from parent process descriptor
966    process_reference_init( process,
967                            new_pid,
968                            parent_pid,
969                            parent_process_xp );
970
971fork_dmsg("\n[DBG] %s : core[%x, %d] child process initialised at cycle %d\n",
972__FUNCTION__ , local_cxy, CURRENT_THREAD->core->lid, hal_get_cycles() );
973
974    // copy VMM from parent descriptor to child descriptor
975    error = vmm_fork_copy( process,
976                           parent_process_xp );
977    if( error )
978    {
979        printk("\n[ERROR] in %s : cannot copy VMM in cluster %x\n", 
980        __FUNCTION__, local_cxy ); 
981        process_free( process );
982        cluster_pid_release( new_pid );
983        return -1;
984    }
985
986fork_dmsg("\n[DBG] %s : core[%x, %d] child process VMM copied at cycle %d\n",
987__FUNCTION__ , local_cxy, CURRENT_THREAD->core->lid, (uint32_t)hal_get_cycles() );
988
989    // create child thread descriptor from parent thread descriptor
990    error = thread_user_fork( parent_thread_xp,
991                              process,
992                              &thread );
993    if( error )
994    {
995        printk("\n[ERROR] in %s : cannot create thread in cluster %x\n",
996        __FUNCTION__, local_cxy ); 
997        process_free( process );
998        cluster_pid_release( new_pid );
999        return -1;
1000    }
1001
1002fork_dmsg("\n[DBG] %s : core[%x,%d] child thread created at cycle %d\n", 
1003__FUNCTION__ , local_cxy, CURRENT_THREAD->core->lid, (uint32_t)hal_get_cycles() );
1004
1005    // update parent process GPT to set Copy_On_Write for shared data vsegs
1006    // this includes all replicated GPT copies
1007    if( parent_process_cxy == local_cxy )   // reference is local
1008    {
1009        vmm_set_cow( parent_process_ptr );
1010    }
1011    else                                    // reference is remote
1012    {
1013        rpc_vmm_set_cow_client( parent_process_cxy,
1014                                parent_process_ptr );
1015    }
1016
1017fork_dmsg("\n[DBG] %s : core[%x,%d] COW set in parent_process at cycle %d\n",
1018__FUNCTION__ , local_cxy, CURRENT_THREAD->core->lid, (uint32_t)hal_get_cycles() );
1019
1020    // update children list in parent process
1021        xlist_add_last( XPTR( parent_process_cxy , &parent_process_ptr->children_root ),
1022                    XPTR( local_cxy , &process->brothers_list ) );
1023        hal_remote_atomic_add( XPTR( parent_process_cxy,
1024                                 &parent_process_ptr->children_nr), 1 );
1025
1026// vmm_display( process , true );
1027// vmm_display( parent_process_ptr , true );
1028// sched_display( 0 );
1029
1030    // return success
1031    *child_thread = thread;
1032    *child_pid    = new_pid;
1033
1034fork_dmsg("\n[DBG] %s : core[%x,%d] exit at cycle %d\n",
1035__FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, (uint32_t)hal_get_cycles() );
1036
1037    return 0;
1038
1039}   // end process_make_fork()
1040
1041
1042/////////////////////////////////////////////////////
1043error_t process_make_exec( exec_info_t  * exec_info )
1044{
1045    char           * path;                    // pathname to .elf file
1046    process_t      * old_process;             // local pointer on old process
1047    process_t      * new_process;             // local pointer on new process
1048    pid_t            old_pid;                 // old process identifier
1049    pid_t            new_pid;                 // new (temporary) process identifier
1050    thread_t       * old_thread;              // pointer on new thread
1051    thread_t       * new_thread;              // pointer on new thread
1052    pthread_attr_t   attr;                    // main thread attributes
1053    lid_t            lid;                     // selected core local index
1054        error_t          error;
1055
1056        // get .elf pathname and PID from exec_info
1057        path     = exec_info->path;
1058    old_pid  = exec_info->pid;
1059
1060    // this function must be executed by a thread running in owner cluster
1061    assert( (CXY_FROM_PID( old_pid ) == local_cxy), __FUNCTION__,
1062    "local cluster %x is not owner for process %x\n", local_cxy, old_pid );
1063
1064exec_dmsg("\n[DBG] %s : core[%x,%d] enters for process %x / path = %s\n",
1065__FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, old_pid , path );
1066
1067    // get old process and thread local pointers
1068    old_process = (process_t *)cluster_get_local_process_from_pid( old_pid );
1069    old_thread  = CURRENT_THREAD;
1070   
1071    if( old_process == NULL )
1072    {
1073        printk("\n[ERROR] in %s : cannot get old process descriptor\n", __FUNCTION__ );
1074        return -1;
1075    }
1076
1077    // allocate memory for new process descriptor
1078    new_process = process_alloc();
1079
1080    if( new_process == NULL )
1081    {
1082        printk("\n[ERROR] in %s : cannot allocate new process descriptor\n", __FUNCTION__ );
1083        return -1;
1084    }
1085
1086    // get a (temporary) PID for new process
1087    error = cluster_pid_alloc( new_process , &new_pid );
1088
1089    if( error )
1090    {
1091        printk("\n[ERROR] in %s : cannot allocate a temporary PID\n", __FUNCTION__ );
1092        process_destroy( new_process );
1093        return -1;
1094    }
1095
1096    // initialize new process descriptor
1097    process_reference_init( new_process,
1098                            new_pid,                            // temporary PID
1099                            old_process->ppid,                  // same parent
1100                            XPTR( local_cxy , old_process ) );
1101
1102exec_dmsg("\n[DBG] %s : core[%x,%d] created new process %x / path = %s\n",
1103__FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, new_pid, path );
1104
1105    // register "code" and "data" vsegs as well as entry-point
1106    // in new process VMM, using information contained in the elf file.
1107        if( elf_load_process( path , new_process ) )
1108        {
1109                printk("\n[ERROR] in %s : failed to access .elf file for process %x / path = %s\n",
1110                __FUNCTION__, new_pid , path );
1111        cluster_pid_release( new_pid );
1112        process_destroy( new_process );
1113        return -1;
1114        }
1115
1116exec_dmsg("\n[DBG] %s : core[%x,%d] vsegs registered / path = %s\n",
1117__FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, path );
1118
1119    // select a core in local cluster to execute the main thread
1120    lid  = cluster_select_local_core();
1121
1122    // initialize pthread attributes for main thread
1123    attr.attributes = PT_ATTR_DETACH | PT_ATTR_CLUSTER_DEFINED | PT_ATTR_CORE_DEFINED;
1124    attr.cxy        = local_cxy;
1125    attr.lid        = lid;
1126
1127    // create and initialize thread descriptor
1128        error = thread_user_create( new_pid,
1129                                (void *)new_process->vmm.entry_point,
1130                                exec_info->args_pointers,
1131                                &attr,
1132                                &new_thread );
1133        if( error )
1134        {
1135                printk("\n[ERROR] in %s : cannot create thread for process %x / path = %s\n",
1136            __FUNCTION__, new_pid , path );
1137        cluster_pid_release( new_pid );
1138        process_destroy( new_process );
1139        return -1;
1140        }
1141
1142exec_dmsg("\n[DBG] %s : core[%x,%d] created main thread %x\n",
1143__FUNCTION__ , local_cxy, CURRENT_THREAD->core->lid, new_thread->trdid );
1144
1145    // update children list rooted in parent process
1146        xlist_replace( XPTR( local_cxy , &old_process->brothers_list ) ,
1147                   XPTR( local_cxy , &new_process->brothers_list ) );
1148
1149    // request blocking for all threads in old process (but the calling thread)
1150    process_sigaction( old_process , BLOCK_ALL_THREADS );
1151
1152    // request destruction for all threads in old process (but the calling thread)
1153    process_sigaction( old_process , DELETE_ALL_THREADS );
1154
1155    // update PID for both processes
1156    new_process->pid = old_pid;
1157    old_process->pid = 0xFFFFFFFF;
1158
1159    // release temporary PID
1160    cluster_pid_release( new_pid );
1161   
1162    // activate new thread
1163        thread_unblock( XPTR( local_cxy , new_thread ) , THREAD_BLOCKED_GLOBAL );
1164
1165exec_dmsg("\n[DBG] %s : core[%x,%d] exit for path = %s\n",
1166__FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, path  );
1167
1168    // set BLOCKED_GLOBAL bit
1169    thread_block( old_thread , THREAD_BLOCKED_GLOBAL );
1170
1171    // set REQ_DELETE flag
1172    hal_atomic_or( &old_thread->flags , THREAD_FLAG_REQ_DELETE );
1173
1174    // deschedule
1175    sched_yield("suicide after exec"); 
1176
1177    // never executed but required by compiler
1178        return 0;
1179
1180}  // end process_make_exec()
1181
1182///////////////////////////////////////
1183void process_make_kill( pid_t      pid,
1184                        uint32_t   sig_id )
1185{
1186    // this function must be executed by a thread running in owner cluster
1187    assert( (CXY_FROM_PID( pid ) == local_cxy) , __FUNCTION__ ,
1188    "must execute in owner cluster" );
1189
1190kill_dmsg("\n[DBG] %s : core[%x,%d] enter / process %x / sig %d\n",
1191__FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, pid , sig_id );
1192
1193    // get pointer on local process descriptor
1194    process_t * process = process_get_local_copy( pid );
1195
1196    // does nothing if process does not exist
1197    if( process == NULL )
1198    {
1199        printk("\n[WARNING] %s : process %x does not exist => do nothing\n",
1200        __FUNCTION__ , pid );
1201        return;
1202    }
1203
1204    // analyse signal type
1205    switch( sig_id )
1206    {
1207        case SIGSTOP:     // block all threads in all clusters
1208        {
1209            process_sigaction( process , BLOCK_ALL_THREADS );
1210        }
1211        break;
1212        case SIGCONT:     // unblock all threads in all clusters
1213        {
1214            process_sigaction( process , UNBLOCK_ALL_THREADS );
1215        }
1216        break;
1217        case SIGKILL:  // block all threads, then delete all threads
1218        {
1219            // block all threads (but the calling thread)
1220            process_sigaction( process , BLOCK_ALL_THREADS );
1221
1222            // delete all threads (but the calling thread)
1223            process_sigaction( process , DELETE_ALL_THREADS );
1224
1225            // delete the calling thread if required
1226            thread_t * this = CURRENT_THREAD;
1227
1228            if( this->process == process )
1229            {
1230                // set REQ_DELETE flag
1231                hal_atomic_or( &this->flags , THREAD_FLAG_REQ_DELETE );
1232
1233                // deschedule
1234                sched_yield( "suicide after kill" ); 
1235            }
1236        }
1237        break;
1238    }
1239
1240//@@@
1241sched_display( 0 );
1242//@@@
1243
1244kill_dmsg("\n[DBG] %s : core[%x,%d] exit / process %x / sig %d \n",
1245__FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, pid , sig_id );
1246
1247}  // end process_make_kill()
1248
1249/////////////////////////////////////////
1250void process_make_exit( pid_t       pid,
1251                        uint32_t    status )
1252{
1253    // this function must be executed by a thread running in owner cluster
1254    assert( (CXY_FROM_PID( pid ) == local_cxy) , __FUNCTION__ ,
1255    "must execute in owner cluster" );
1256
1257    // get pointer on local process descriptor
1258    process_t * process = process_get_local_copy( pid );
1259
1260    // does nothing if process does not exist
1261    if( process == NULL )
1262    {
1263        printk("\n[WARNING] %s : process %x does not exist => do nothing\n",
1264        __FUNCTION__ , pid );
1265        return;
1266    }
1267
1268    // block all threads in all clusters (but the calling thread)
1269    process_sigaction( process , BLOCK_ALL_THREADS );
1270
1271    // delete all threads in all clusters (but the calling thread)
1272    process_sigaction( process , DELETE_ALL_THREADS );
1273
1274    // delete the calling thread
1275    hal_atomic_or( &CURRENT_THREAD->flags , THREAD_FLAG_REQ_DELETE );
1276
1277    // deschedule
1278    sched_yield( "suicide after exit" ); 
1279
1280}  // end process_make_exit()
1281
1282//////////////////////////
1283void process_init_create()
1284{
1285    process_t      * process;       // local pointer on process_init descriptor
1286    pid_t            pid;           // process_init identifier
1287    thread_t       * thread;        // local pointer on main thread
1288    pthread_attr_t   attr;          // main thread attributes
1289    lid_t            lid;           // selected core local index for main thread
1290    error_t          error;
1291
1292kinit_dmsg("\n[DBG] %s :  core[%x,%d] enters\n", 
1293__FUNCTION__ , local_cxy, CURRENT_THREAD->core->lid );
1294
1295    // allocates memory for process descriptor from local cluster
1296        process = process_alloc(); 
1297        if( process == NULL )
1298    {
1299                printk("\n[PANIC] in %s : no memory for process descriptor in cluster %x\n",
1300                __FUNCTION__, local_cxy  );
1301    }
1302
1303    // get PID from local cluster
1304    error = cluster_pid_alloc( process , &pid );
1305    if( error )
1306    {
1307                printk("\n[PANIC] in %s : cannot allocate PID in cluster %x\n",
1308                __FUNCTION__, local_cxy );
1309        process_destroy( process );
1310    }
1311
1312    assert( (LPID_FROM_PID(pid) == 1) , __FUNCTION__ , "LPID must be 1 for process_init" );
1313
1314    // initialize process descriptor / parent is local process_zero
1315    process_reference_init( process,
1316                            pid,
1317                            0,
1318                            XPTR( local_cxy , &process_zero ) );
1319
1320kinit_dmsg("\n[DBG] %s : core[%x,%d] / process initialised\n", 
1321__FUNCTION__ , local_cxy, CURRENT_THREAD->core->lid );
1322
1323    // register "code" and "data" vsegs as well as entry-point
1324    // in process VMM, using information contained in the elf file.
1325        if( elf_load_process( CONFIG_PROCESS_INIT_PATH , process ) )
1326        {
1327                printk("\n[PANIC] in %s : cannot access .elf file / path = %s\n",
1328                __FUNCTION__, CONFIG_PROCESS_INIT_PATH );
1329        process_destroy( process );
1330        }
1331
1332kinit_dmsg("\n[DBG] %s : core[%x,%d] vsegs registered / path = %s\n",
1333__FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, CONFIG_PROCESS_INIT_PATH );
1334
1335    // select a core in local cluster to execute the main thread
1336    lid  = cluster_select_local_core();
1337
1338    // initialize pthread attributes for main thread
1339    attr.attributes = PT_ATTR_DETACH | PT_ATTR_CLUSTER_DEFINED | PT_ATTR_CORE_DEFINED;
1340    attr.cxy        = local_cxy;
1341    attr.lid        = lid;
1342
1343    // create and initialize thread descriptor
1344        error = thread_user_create( pid,
1345                                (void *)process->vmm.entry_point,
1346                                NULL,
1347                                &attr,
1348                                &thread );
1349        if( error )
1350        {
1351                printk("\n[PANIC] in %s : cannot create main thread / path = %s\n",
1352                __FUNCTION__, CONFIG_PROCESS_INIT_PATH );
1353        process_destroy( process );
1354        }
1355
1356    // activate thread
1357        thread_unblock( XPTR( local_cxy , thread ) , THREAD_BLOCKED_GLOBAL );
1358
1359    hal_fence();
1360
1361kinit_dmsg("\n[DBG] %s : core[%x,%d] exit / main thread = %x\n",
1362__FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, thread );
1363
1364}  // end process_init_create()
1365
Note: See TracBrowser for help on using the repository browser.