Changeset 433


Ignore:
Timestamp:
Feb 14, 2018, 3:40:19 PM (4 years ago)
Author:
alain
Message:

blip

Location:
trunk/kernel
Files:
38 edited

Legend:

Unmodified
Added
Removed
  • trunk/kernel/devices/dev_txt.c

    r422 r433  
    117117    thread_t * this = CURRENT_THREAD;
    118118
    119 txt_dmsg("\n[DBG] %s : core[%x,%d] (thread %s) enters / cycle %d\n",
    120 __FUNCTION__, local_cxy, this->core->lid, thread_type_str(this->type), hal_time_stamp() );
     119#if CONFIG_DEBUG_DEV_TXT
     120uint32_t cycle = (uint32_t)hal_get_cycles();
     121if( CONFIG_DEBUG_DEV_TXT < cycle )
     122printk("\n[DBG] %s : thread %x enters / cycle %d\n",
     123__FUNCTION__, CURRENT_THREAD , cycle );
     124#endif
    121125
    122126    // check channel argument
     
    140144    chdev_register_command( dev_xp );
    141145
    142 txt_dmsg("\n[DBG] %s : core[%x,%d] (thread %s) exit / cycle %d\n",
    143 __FUNCTION__, local_cxy, this->core->lid, thread_type_str(this->type), hal_time_stamp() );
     146#if CONFIG_DEBUG_DEV_TXT
     147cycle = (uint32_t)hal_get_cycles();
     148if( CONFIG_DEBUG_DEV_TXT < cycle )
     149printk("\n[DBG] %s : thread %x exit / cycle %d\n",
     150__FUNCTION__, CURRENT_THREAD , cycle );
     151#endif
    144152
    145153    // return I/O operation status from calling thread descriptor
  • trunk/kernel/fs/devfs.c

    r430 r433  
    2828#include <printk.h>
    2929#include <chdev.h>
     30#include <thread.h>
    3031#include <dev_txt.h>
    3132#include <cluster.h>
     
    8687    error_t  error;
    8788
    88     devfs_dmsg("\n[DBG] %s : enter in cluster %x\n",
    89                __FUNCTION__ , local_cxy );
     89#if CONFIG_DEBUG_DEVFS_INIT
     90uint32_t cycle = (uint32_t)hal_get_cycles();
     91if( CONFIG_DEBUG_DEVFS_INIT < cycle )
     92printk("\n[DBG] %s : thread %x enter at cycle %d\n",
     93__FUNCTION__ , CURRENT_THREAD , cycle );
     94#endif
    9095
    9196    // creates DEVFS "dev" inode in cluster IO
     
    100105    assert( (error == 0) , __FUNCTION__ , "cannot create <dev>\n" );
    101106
    102     devfs_dmsg("\n[DBG] %s : <dev> created in cluster %x\n",
    103                __FUNCTION__ , local_cxy );
    104 
    105107    // create DEVFS "external" inode in cluster IO
    106108    error = vfs_add_child_in_parent( LOCAL_CLUSTER->io_cxy,
     
    114116    assert( (error == 0) , __FUNCTION__ , "cannot create <external>\n" );
    115117
    116     devfs_dmsg("\n[DBG] %s : <external> created in cluster %x\n",
    117                __FUNCTION__ , local_cxy );
     118#if CONFIG_DEBUG_DEVFS_INIT
     119cycle = (uint32_t)hal_get_cycles();
     120if( CONFIG_DEBUG_DEVFS_INIT < cycle )
     121printk("\n[DBG] %s : thread %x exit at cycle %d\n",
     122__FUNCTION__ , CURRENT_THREAD , cycle );
     123#endif
     124
    118125}
    119126
     
    129136    xptr_t        inode_xp;
    130137    uint32_t      channel;
     138
     139#if CONFIG_DEBUG_DEVFS_INIT
     140uint32_t cycle = (uint32_t)hal_get_cycles();
     141if( CONFIG_DEBUG_DEVFS_INIT < cycle )
     142printk("\n[DBG] %s : thread %x enter at cycle %d\n",
     143__FUNCTION__ , CURRENT_THREAD , cycle );
     144#endif
    131145
    132146    // create "internal" directory linked to "dev"
     
    140154                             devfs_internal_inode_xp );
    141155
    142 devfs_dmsg("\n[DBG] %s : created <%s> inode in cluster %x\n",
    143 __FUNCTION__ , node_name , local_cxy );
    144 
    145156    // create MMC chdev inode
    146157    chdev_xp  = chdev_dir.mmc[local_cxy];
     
    155166                                 GET_PTR( chdev_xp ),
    156167                                 &inode_xp );
    157 
    158 devfs_dmsg("\n[DBG] %s : created <%s> inode in cluster %x\n",
    159 __FUNCTION__ , chdev_ptr->name , local_cxy );
    160 
    161168    }
    162169
     
    175182                                     GET_PTR( chdev_xp ),
    176183                                     &inode_xp );
    177 
    178 devfs_dmsg("\n[DBG] %s : created <%s> inode in cluster %x\n",
    179 __FUNCTION__ , chdev_ptr->name , local_cxy );
    180 
    181184        }
    182185    }
     
    197200                                     GET_PTR( chdev_xp ),
    198201                                     &inode_xp );
    199 
    200 devfs_dmsg("\n[DBG] %s : created <%s> inode in cluster %x\n",
    201 __FUNCTION__ , chdev_ptr->name , local_cxy );
    202 
    203202        }
    204203    }
     
    219218                                     GET_PTR( chdev_xp ),
    220219                                     &inode_xp );
    221 
    222 devfs_dmsg("\n[DBG] %s : created <%s> inode in cluster %x\n",
    223 __FUNCTION__ , chdev_ptr->name , local_cxy );
    224 
    225220        }
    226221    }
     
    243238                                         GET_PTR( chdev_xp ),
    244239                                         &inode_xp );
    245 
    246 devfs_dmsg("\n[DBG] %s : created <%s> inode in cluster %x\n",
    247 __FUNCTION__ , chdev_ptr->name , local_cxy );
    248 
    249240            }
    250241        }
     
    268259                                         GET_PTR( chdev_xp ),
    269260                                         &inode_xp );
    270 
    271 devfs_dmsg("\n[DBG] %s : created <%s> inode in cluster %x\n",
    272 __FUNCTION__ , chdev_ptr->name , local_cxy );
    273 
    274261            }
    275262        }
     
    293280                                         GET_PTR( chdev_xp ),
    294281                                         &inode_xp );
    295 
    296 devfs_dmsg("\n[DBG] %s : created <%s> inode in cluster %x\n",
    297 __FUNCTION__ , chdev_ptr->name , local_cxy );
    298 
    299282            }
    300283        }
     
    318301                                         GET_PTR( chdev_xp ),
    319302                                         &inode_xp );
    320 
    321 devfs_dmsg("\n[DBG] %s : created <%s> inode in cluster %x\n",
    322 __FUNCTION__ , chdev_ptr->name , local_cxy );
    323 
    324303            }
    325304        }
     
    343322                                         GET_PTR( chdev_xp ),
    344323                                         &inode_xp );
    345 
    346 devfs_dmsg("\n[DBG] %s : created <%s> inode in cluster %x\n",
    347 __FUNCTION__ , chdev_ptr->name , local_cxy );
    348 
    349324            }
    350325        }
     
    368343                                         GET_PTR( chdev_xp ),
    369344                                         &inode_xp );
    370 
    371 devfs_dmsg("\n[DBG] %s : created <%s> inode in cluster %x\n",
    372 __FUNCTION__ , chdev_ptr->name , local_cxy );
    373 
    374             }
    375         }
    376     }
     345            }
     346        }
     347    }
     348
     349#if CONFIG_DEBUG_DEVFS_INIT
     350cycle = (uint32_t)hal_get_cycles();
     351if( CONFIG_DEBUG_DEVFS_INIT < cycle )
     352printk("\n[DBG] %s : thread %x exit at cycle %d\n",
     353__FUNCTION__ , CURRENT_THREAD , cycle );
     354#endif
     355
    377356}  // end devfs_local_init()
    378357
     
    396375    char               k_buf[CONFIG_TXT_KBUF_SIZE];  // local kernel buffer
    397376
    398 devfs_dmsg("\n[DBG] %s enter / cycle %d\n",
    399 __FUNCTION__ , hal_time_stamp() );
     377#if CONFIG_DEBUG_DEVFS_MOVE
     378uint32_t cycle = (uint32_t)hal_get_cycles();
     379if( CONFIG_DEBUG_DEVFS_MOVE < cycle )
     380printk("\n[DBG] %s : thread %x enter / to_mem %d / cycle %d\n",
     381__FUNCTION__ , CURRENT_THREAD , to_buffer , cycle );
     382#endif
    400383
    401384#if CONFIG_READ_DEBUG
     
    426409                if( error )
    427410                {
    428 
    429 devfs_dmsg("\n[DBG] %s exit error / cycle %d\n",
    430 __FUNCTION__ , hal_time_stamp() );
    431 
    432411                    return -1;
    433412                }
     
    438417             }
    439418
     419#if CONFIG_DEBUG_DEVFS_MOVE
     420cycle = (uint32_t)hal_get_cycles();
     421if( CONFIG_DEBUG_DEVFS_MOVE < cycle )
     422printk("\n[DBG] %s : thread %x exit / to_mem %d / cycle %d\n",
     423__FUNCTION__ , CURRENT_THREAD , to_buffer / cycle );
     424#endif
     425
    440426#if CONFIG_READ_DEBUG
    441427exit_devfs_move = hal_time_stamp();
    442428#endif
    443 
    444 devfs_dmsg("\n[DBG] %s exit success / size = %d / cycle %d\n",
    445 __FUNCTION__ , size , hal_time_stamp() );
    446 
    447429            return size;
    448430        }
     
    454436            if( error )
    455437            {
    456 
    457 devfs_dmsg("\n[DBG] %s exit error / cycle %d\n",
    458 __FUNCTION__ , hal_time_stamp() );
    459 
    460438                return -1;
    461439            }
     
    463441            {
    464442
    465 devfs_dmsg("\n[DBG] %s exit success / size = %d / cycle %d\n",
    466 __FUNCTION__ , size , hal_time_stamp() );
     443#if CONFIG_DEBUG_DEVFS_MOVE
     444cycle = (uint32_t)hal_get_cycles();
     445if( CONFIG_DEBUG_DEVFS_MOVE < cycle )
     446printk("\n[DBG] %s : thread %x exit / to_mem %d / cycle %d\n",
     447__FUNCTION__ , CURRENT_THREAD , to_buffer / cycle );
     448#endif
    467449
    468450                return size;
     
    477459        return -1;
    478460    }
     461
    479462}  // end devfs_user_move()
    480463
  • trunk/kernel/fs/vfs.c

    r430 r433  
    157157    error_t            error;
    158158
    159 vfs_dmsg("\n[DBG] %s : core[%x,%d] enter / dentry = %x in cluster %x\n",
    160 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, GET_PTR(dentry_xp), GET_CXY(dentry_xp) );
     159#if CONFIG_DEBUG_VFS_INODE_CREATE
     160uint32_t cycle = (uint32_t)hal_get_cycles();
     161if( CONFIG_DEBUG_VFS_INODE_CREATE < cycle )
     162printk("\n[DBG] %s : thread %x enter / dentry = %x in cluster %x / cycle %d\n",
     163__FUNCTION__, CURRENT_THREAD, GET_PTR(dentry_xp), GET_CXY(dentry_xp), cycle );
     164#endif
    161165 
    162166    // check fs type and get pointer on context
     
    230234    remote_spinlock_init( XPTR( local_cxy , &inode->main_lock ) );
    231235
    232 vfs_dmsg("\n[DBG] %s : core[%x,%d] exit / inode = %x in cluster %x\n",
    233 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, inode , local_cxy );
     236#if CONFIG_DEBUG_VFS_INODE_CREATE
     237uint32_t cycle = (uint32_t)hal_get_cycles();
     238if( CONFIG_DEBUG_VFS_INODE_CREATE < cycle )
     239printk("\n[DBG] %s : thread %x exit / inode = %x in cluster %x / cycle %d\n",
     240__FUNCTION__, CURRENT_THREAD, inode, local_cxy, cycle );
     241#endif
    234242 
    235243    // return extended pointer on inode
     
    263271                        xptr_t        child_xp )
    264272{
    265 vfs_dmsg("\n[DBG] %s : core[%x,%d] enter for <%s> / cycle %d\n",
    266 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , name , hal_time_stamp() );
     273
     274#if CONFIG_DEBUG_VFS_INODE_LOAD
     275uint32_t cycle = (uint32_t)hal_get_cycles();
     276if( CONFIG_DEBUG_VFS_INODE_LOAD < cycle )
     277printk("\n[DBG] %s : thread %x enter for <%s> / cycle %d\n",
     278__FUNCTION__, CURRENT_THREAD , name , cycle );
     279#endif
    267280
    268281    error_t error = 0;
     
    293306    }
    294307
    295 vfs_dmsg("\n[DBG] %s : core[%x,%d] exit for <%s> / cycle %d\n",
    296 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , name , hal_time_stamp() );
     308#if CONFIG_DEBUG_VFS_INODE_LOAD
     309cycle = (uint32_t)hal_get_cycles();
     310if( CONFIG_DEBUG_VFS_INODE_LOAD < cycle )
     311printk("\n[DBG] %s : thread %x exit for <%s> / cycle %d\n",
     312__FUNCTION__, CURRENT_THREAD , name , cycle );
     313#endif
    297314
    298315    return error;
     
    416433        kmem_req_t       req;        // request to kernel memory allocator
    417434
    418 vfs_dmsg("\n[DBG] %s : core[%x,%d] enter for <%s> / parent inode = %x / cycle %d\n",
    419 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, name, parent, hal_time_stamp() );
     435#if CONFIG_DEBUG_VFS_DENTRY_CREATE
     436uint32_t cycle = (uint32_t)hal_get_cycles();
     437if( CONFIG_DEBUG_VFS_DENTRY_CREATE < cycle )
     438printk("\n[DBG] %s : thread %x enter for <%s> / parent_inode %x / cycle %d\n",
     439__FUNCTION__, CURRENT_THREAD , name , parent , cycle );
     440#endif
    420441
    421442    // get pointer on context
     
    465486    *dentry_xp = XPTR( local_cxy , dentry );
    466487
    467 vfs_dmsg("\n[DBG] %s : core[%x,%d] exit for <%s> / dentry = %x in cluster %x / cycle %d\n",
    468 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, name, dentry, local_cxy , hal_time_stamp() );
     488#if CONFIG_DEBUG_VFS_DENTRY_CREATE
     489cycle = (uint32_t)hal_get_cycles();
     490if( CONFIG_DEBUG_VFS_DENTRY_CREATE < cycle )
     491printk("\n[DBG] %s : thread %x exit for <%s> / dentry %x / cycle %d\n",
     492__FUNCTION__, CURRENT_THREAD , name , dentry , cycle );
     493#endif
    469494
    470495    return 0;
     
    584609    uint32_t      file_id;      // created file descriptor index in reference fd_array
    585610
    586 vfs_dmsg("\n[DBG] %s : core[%x,%d] enter for <%s> / cycle %d\n",
    587 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, path, (uint32_t)hal_time_stamp() );
     611#if CONFIG_DEBUG_VFS_OPEN
     612uint32_t cycle = (uint32_t)hal_get_cycles();
     613if( CONFIG_DEBUG_VFS_OPEN < cycle )
     614printk("\n[DBG] %s :  thread %x enter for <%s> / cycle %d\n",
     615__FUNCTION__, CURRENT_THREAD, path, cycle );
     616#endif
    588617
    589618    // compute lookup working mode
     
    610639    inode_ptr = (vfs_inode_t *)GET_PTR( inode_xp );
    611640   
    612 vfs_dmsg("\n[DBG] %s : core[%x,%d] found inode for <%s> in cluster %x / cycle %d\n",
    613 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, path, inode_cxy , (uint32_t)hal_time_stamp() );
    614 
    615641    // create a new file descriptor in cluster containing inode
    616642    if( inode_cxy == local_cxy )      // target cluster is local
     
    630656    if( error ) return error;
    631657
    632 vfs_dmsg("\n[DBG] %s : core[%x,%d] exit for <%s> / file = %x in cluster %x / cycle %d\n",
    633 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, path,
    634 GET_PTR(file_xp), GET_CXY(file_xp), hal_time_stamp() );
     658#if CONFIG_DEBUG_VFS_OPEN
     659cycle = (uint32_t)hal_get_cycles();
     660if( CONFIG_DEBUG_VFS_OPEN < cycle )
     661printk("\n[DBG] %s : thread %x exit for <%s> / file %x in cluster %x / cycle %d\n",
     662__FUNCTION__, CURRENT_THREAD, path, GET_PTR(file_xp), GET_CXY(file_xp), cycle );
     663#endif
    635664
    636665    // success
     
    647676                   uint32_t size )
    648677{
    649     assert( ( file_xp != XPTR_NULL ) , __FUNCTION__ ,
    650     "file_xp == XPTR_NULL" );
     678    assert( ( file_xp != XPTR_NULL ) , __FUNCTION__ , "file_xp == XPTR_NULL" );
    651679
    652680    cxy_t              file_cxy;     // remote file descriptor cluster
     
    13191347    process = this->process;
    13201348
    1321 vfs_dmsg("\n[DBG] %s : core[%x,%d] enter for <%s> / cycle %d\n",
    1322 __FUNCTION__ , local_cxy , this->core->lid , pathname , hal_time_stamp() );
     1349#if CONFIG_DEBUG_VFS_LOOKUP
     1350uint32_t cycle = (uint32_t)hal_get_cycles();
     1351if( CONFIG_DEBUG_VFS_LOOKUP < cycle )
     1352printk("\n[DBG] %s : thread %x enter for <%s> / cycle %d\n",
     1353__FUNCTION__, CURRENT_THREAD, path, cycle );
     1354#endif
    13231355
    13241356    // get extended pointer on first inode to search
     
    13431375        vfs_get_name_from_path( current , name , &next , &last );
    13441376
    1345 vfs_dmsg("\n[DBG] %s : core[%x,%d] look for <%s> / last = %d\n",
    1346 __FUNCTION__ , local_cxy , this->core->lid , name , last );
     1377#if (CONFIG_DEBUG_VFS_LOOKUP & 1)
     1378if( CONFIG_DEBUG_VFS_LOOKUP < cycle )
     1379printk("\n[DBG] %s : look for <%s> / last = %d\n", __FUNCTION__ , name , last );
     1380#endif
    13471381
    13481382        // search a child dentry matching name in parent inode
     
    13621396        {
    13631397
    1364 vfs_dmsg("\n[DBG] %s : core[%x,%d] miss <%s> => load it\n",
    1365 __FUNCTION__ , local_cxy , this->core->lid , name );
     1398#if (CONFIG_DEBUG_VFS_LOOKUP & 1)
     1399if( CONFIG_DEBUG_VFS_LOOKUP < cycle )
     1400printk("\n[DBG] %s : miss <%s> => load it\n", __FUNCTION__ , name );
     1401#endif
    13661402
    13671403            // release lock on parent inode
     
    14461482            vfs_inode_lock( parent_xp );
    14471483
    1448 vfs_dmsg("\n[DBG] %s : core[%x,%d] created node <%s>\n",
    1449 __FUNCTION__ , local_cxy , this->core->lid , name );
     1484#if (CONFIG_DEBUG_VFS_LOOKUP & 1)
     1485if( CONFIG_DEBUG_VFS_LOOKUP < cycle )
     1486printk("\n[DBG] %s : created node <%s>\n", __FUNCTION__ , name );
     1487#endif
    14501488
    14511489        }
    14521490
    1453 vfs_dmsg("\n[DBG] %s : core[%x,%d] found <%s> / inode = %x in cluster %x\n",
    1454 __FUNCTION__ , local_cxy , this->core->lid , name , GET_PTR(child_xp) , GET_CXY(child_xp) );
     1491#if (CONFIG_DEBUG_VFS_LOOKUP & 1)
     1492if( CONFIG_DEBUG_VFS_LOOKUP < cycle )
     1493printk("\n[DBG] %s : found <%s> / inode %x in cluster %x\n",
     1494__FUNCTION__ , name , GET_PTR(child_xp) , GET_CXY(child_xp) );
     1495#endif
    14551496
    14561497        // TODO check access rights here [AG]
     
    14771518    vfs_inode_unlock( parent_xp );
    14781519
    1479 vfs_dmsg("\n[DBG] %s : core[%x,%d] exit for <%s> / inode = %x in cluster %x\n",
    1480 __FUNCTION__,local_cxy,this->core->lid,pathname,GET_PTR(child_xp),GET_CXY(child_xp) );
     1520#if CONFIG_DEBUG_VFS_LOOKUP
     1521cycle = (uint32_t)hal_get_cycles();
     1522if( CONFIG_DEBUG_VFS_LOOKUP < cycle )
     1523printk("\n[DBG] %s : thread %x exit for <%s> / inode %x in cluster %x / cycle %d\n",
     1524__FUNCTION__, CURRENT_THREAD, path, GET_PTR(child_xp), GET_CXY(child_xp), cycle );
     1525#endif
    14811526
    14821527    // return searched pointer
     
    15021547    // we use two variables "index" and "count" because the buffer
    15031548    // is written in decreasing index order (from leaf to root)
    1504     // TODO : handle conflict with a concurrent rename [AG]
    1505     // TODO : handle synchro in the loop  [AG]
     1549    // TODO  : handle conflict with a concurrent rename [AG]
     1550    // FIXME : handle synchro in the loop  [AG]
    15061551
    15071552        // set the NUL character in buffer / initialise buffer index and count
     
    15761621    parent_ptr = (vfs_inode_t *)GET_PTR( parent_xp );
    15771622
    1578 vfs_dmsg("\n[DBG] %s : core[%x,%d] enter for <%s> / child_cxy = %x / parent_cxy = %x\n",
    1579 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , name , child_cxy , parent_cxy );
     1623#if CONFIG_DEBUG_VFS_ADD_CHILD
     1624uint32_t cycle = (uint32_t)hal_get_cycles();
     1625if( CONFIG_DEBUG_VFS_ADD_CHILD < cycle )
     1626printk("\n[DBG] %s : thread %x enter for <%s> / child_cxy = %x / parent_cxy = %x\n",
     1627__FUNCTION__ , CURRENT_THREAD , name , child_cxy , parent_cxy );
     1628#endif
    15801629
    15811630    // 1. create dentry
     
    15871636                                   &dentry_xp );
    15881637
    1589 vfs_dmsg("\n[DBG] %s : dentry <%s> created in local cluster %x\n",
    1590 __FUNCTION__ , name , local_cxy );
     1638#if (CONFIG_DEBUG_VFS_ADD_CHILD & 1)
     1639if( CONFIG_DEBUG_VFS_ADD_CHILD < cycle )
     1640printk("\n[DBG] %s : dentry <%s> created in cluster %x\n", __FUNCTION__, name, local_cxy );
     1641#endif
    15911642
    15921643    }
     
    16001651                                      &error );
    16011652
    1602 vfs_dmsg("\n[DBG] %s : dentry <%s> created in remote cluster %x\n",
    1603 __FUNCTION__ , name , parent_cxy );
     1653#if (CONFIG_DEBUG_VFS_ADD_CHILD & 1)
     1654if( CONFIG_DEBUG_VFS_ADD_CHILD < cycle )
     1655printk("\n[DBG] %s : dentry <%s> created in cluster %x\n", __FUNCTION__, name, parent_cxy );
     1656#endif
    16041657
    16051658    }
     
    16291682                                  gid,
    16301683                                  &inode_xp );
     1684
     1685#if (CONFIG_DEBUG_VFS_ADD_CHILD & 1)
     1686if( CONFIG_DEBUG_VFS_ADD_CHILD < cycle )
     1687printk("\n[DBG] %s : inode <%x> created in cluster %x\n",
     1688__FUNCTION__ , GET_PTR(inode_xp) , local_cxy );
     1689#endif
    16311690
    16321691vfs_dmsg("\n[DBG] %s : inode %x created in local cluster %x\n",
     
    16481707                                     &error );
    16491708
    1650 vfs_dmsg("\n[DBG] %s : inode %x created in remote cluster %x\n",
     1709#if (CONFIG_DEBUG_VFS_ADD_CHILD & 1)
     1710if( CONFIG_DEBUG_VFS_ADD_CHILD < cycle )
     1711printk("\n[DBG] %s : inode <%s> created in cluster %x\n",
    16511712__FUNCTION__ , GET_PTR(inode_xp) , child_cxy );
     1713#endif
    16521714
    16531715    }
     
    16691731    hal_remote_swd( XPTR( dentry_cxy , &dentry_ptr->child_xp ) , inode_xp );
    16701732
    1671 vfs_dmsg("\n[DBG] %s : exit in cluster %x for <%s>\n",
    1672 __FUNCTION__ , local_cxy , name );
     1733#if CONFIG_DEBUG_VFS_ADD_CHILD
     1734cycle = (uint32_t)hal_get_cycles();
     1735if( CONFIG_DEBUG_VFS_ADD_CHILD < cycle )
     1736printk("\n[DBG] %s : thread %x exit for <%s>\n",
     1737__FUNCTION__ , CURRENT_THREAD , name );
     1738#endif
    16731739
    16741740    // success : return extended pointer on child inode
     
    16941760    assert( (mapper != NULL) , __FUNCTION__ , "no mapper for page\n" );
    16951761
    1696 vfs_dmsg("\n[DBG] %s : core[%x,%d] enters for page %d / mapper = %x / inode = %x\n",
    1697 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid , page->index , mapper, mapper->inode );
     1762#if CONFIG_DEBUG_VFS_MAPPER_MOVE
     1763uint32_t cycle = (uint32_t)hal_get_cycles();
     1764if( CONFIG_DEBUG_VFS_MAPPER_MOVE < cycle )
     1765printk("\n[DBG] %s : thread %x enter for page %d / mapper %x / inode %x / cycle %d\n",
     1766__FUNCTION__, CURRENT_THREAD, page->index, mapper, mapper->inode, cycle );
     1767#endif
    16981768
    16991769    // get FS type
     
    17201790    }
    17211791
    1722 vfs_dmsg("\n[DBG] %s : core[%x,%d] exit for page %d / mapper = %x / inode = %x\n",
    1723 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, page->index, mapper, mapper->inode );
     1792#if CONFIG_DEBUG_VFS_MAPPER_MOVE
     1793cycle = (uint32_t)hal_get_cycles();
     1794if( CONFIG_DEBUG_VFS_MAPPER_MOVE < cycle )
     1795printk("\n[DBG] %s : thread %x exit for page %d / mapper %x / inode %x / cycle %d\n",
     1796__FUNCTION__, CURRENT_THREAD, page->index, mapper, mapper->inode, cycle );
     1797#endif
    17241798
    17251799    return error;
     
    17401814    assert( (mapper != NULL) , __FUNCTION__ , "mapper pointer is NULL\n" );
    17411815
    1742 vfs_dmsg("\n[DBG] %s : core[%x,%d] enter for inode %x in cluster %x/ cycle %d\n",
    1743 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, inode , local_cxy , hal_time_stamp() );
     1816#if CONFIG_DEBUG_VFS_MAPPER_LOAD
     1817uint32_t cycle = (uint32_t)hal_get_cycles();
     1818if( CONFIG_DEBUG_VFS_MAPPER_MOVE < cycle )
     1819printk("\n[DBG] %s : thread %x enter for inode %x in cluster %x / cycle %d\n",
     1820__FUNCTION__, CURRENT_THREAD, inode, local_cxy, cycle );
     1821#endif
    17441822
    17451823    // compute number of pages
     
    17571835    }
    17581836
    1759 vfs_dmsg("\n[DBG] %s : core[%x,%d] exit for inode %x in cluster %x / cycle %d\n",
    1760 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, inode , local_cxy , hal_time_stamp() );
     1837#if CONFIG_DEBUG_VFS_MAPPER_LOAD
     1838cycle = (uint32_t)hal_get_cycles();
     1839if( CONFIG_DEBUG_VFS_MAPPER_MOVE < cycle )
     1840printk("\n[DBG] %s : thread %x exit for inode %x in cluster %x / cycle %d\n",
     1841__FUNCTION__, CURRENT_THREAD, inode, local_cxy, cycle );
     1842#endif
    17611843
    17621844    return 0;
  • trunk/kernel/kern/chdev.c

    r428 r433  
    129129    thread_t * this = CURRENT_THREAD;
    130130
    131 chdev_dmsg("\n[DBG] %s : core[%x,%d] (thread %s) enter / cycle %d\n",
    132 __FUNCTION__, local_cxy, this->core->lid, thread_type_str(this->type) , hal_time_stamp() );
     131#if CONFIG_DEBUG_CHDEV_REGISTER_COMMAND
     132uint32_t cycle = (uint32_t)hal_get_cycles();
     133if( CONFIG_DEBUG_CHDEV_REGISTER_COMMAND < cycle )
     134printk("\n[DBG] %s : client_thread %x (%s) enter / cycle %d\n",
     135__FUNCTION__, this, thread_type_str(this->type) , cycle );
     136#endif
    133137
    134138    // get device descriptor cluster and local pointer
     
    142146    // get local pointer on server thread
    143147    server_ptr = (thread_t *)hal_remote_lpt( XPTR( chdev_cxy , &chdev_ptr->server) );
    144 
    145 chdev_dmsg("\n[DBG] %s : core[%x,%d] (thread %s) / server_cxy %x / server_ptr %x / server_type %\n",
    146 __FUNCTION__, local_cxy, this->core->lid, server_cxy, server_ptr,
    147 thread_type_str( hal_remote_lw( XPTR( server_cxy , &server_ptr->type) ) ) );
    148148
    149149    // build extended pointer on chdev lock protecting queue
     
    178178    if( different ) dev_pic_send_ipi( chdev_cxy , lid );
    179179   
    180 chdev_dmsg("\n[DBG] %s : core[%x,%d] (thread %s) deschedules / cycle %d\n",
    181 __FUNCTION__, local_cxy, this->core->lid, thread_type_str(this->type) , hal_time_stamp() );
     180#if CONFIG_DEBUG_CHDEV_REGISTER_COMMAND
     181cycle = (uint32_t)hal_get_cycles();
     182if( CONFIG_DEBUG_CHDEV_REGISTER_COMMAND < cycle )
     183printk("\n[DBG] %s : client_thread %x (%s) exit / cycle %d\n",
     184__FUNCTION__, this, thread_type_str(this->type) , cycle );
     185#endif
    182186
    183187    // deschedule
     
    185189    sched_yield("blocked on I/O");
    186190
    187 chdev_dmsg("\n[DBG] %s : core[%x,%d] (thread %s) resumes / cycle %d\n",
    188 __FUNCTION__, local_cxy, this->core->lid, thread_type_str(this->type) , hal_time_stamp() );
    189 
    190191    // exit critical section
    191192    hal_restore_irq( save_sr );
     193
     194#if CONFIG_DEBUG_CHDEV_REGISTER_COMMAND
     195cycle = (uint32_t)hal_get_cycles();
     196if( CONFIG_DEBUG_CHDEV_REGISTER_COMMAND < cycle )
     197printk("\n[DBG] %s : client_thread %x (%s) resumes / cycle %d\n",
     198__FUNCTION__, this, thread_type_str(this->type) , cycle );
     199#endif
    192200
    193201#if CONFIG_READ_DEBUG
     
    209217    server = CURRENT_THREAD;
    210218
    211 chdev_dmsg("\n[DBG] %s : enter / server = %x / chdev = %x / cycle %d\n",
    212 __FUNCTION__ , server , chdev , hal_time_stamp() );
     219#if CONFIG_DEBUG_CHDEV_SEQUENCIAL_SERVER
     220uint32_t cycle = (uint32_t)hal_get_cycles();
     221if( CONFIG_DEBUG_CHDEV_SEQUENCIAL_SERVER < cycle )
     222printk("\n[DBG] %s : server_thread %x enter / chdev = %x / cycle %d\n",
     223__FUNCTION__ , server , chdev , cycle );
     224#endif
    213225
    214226    root_xp = XPTR( local_cxy , &chdev->wait_root );
     
    265277            thread_unblock( client_xp , THREAD_BLOCKED_IO );
    266278
    267 chdev_dmsg("\n[DBG] %s : thread %x complete operation for client %x / cycle %d\n",
    268 __FUNCTION__ , server , client_ptr , hal_time_stamp() );
     279#if CONFIG_DEBUG_CHDEV_SEQUENCIAL_SERVER
     280cycle = (uint32_t)hal_get_cycles();
     281if( CONFIG_DEBUG_CHDEV_SEQUENCIAL_SERVER < cycle )
     282printk("\n[DBG] %s : server_thread %x complete operation for client %x / cycle %d\n",
     283__FUNCTION__ , server , client_ptr , cycle );
     284#endif
    269285
    270286#if CONFIG_READ_DEBUG
  • trunk/kernel/kern/cluster.c

    r428 r433  
    8989        spinlock_init( &cluster->kcm_lock );
    9090
    91 cluster_dmsg("\n[DBG] %s for cluster %x enters\n",
    92 __FUNCTION__ , local_cxy );
     91#if CONFIG_DEBUG_CLUSTER_INIT
     92uint32_t cycle = (uint32_t)hal_get_cycles();
     93if( CONFIG_DEBUG_CLUSTER_INIT < cycle )
     94printk("\n[DBG] %s enters for cluster %x / cycle %d\n",
     95__FUNCTION__ , local_cxy , cycle );
     96#endif
    9397
    9498    // initialises DQDT
     
    109113    }
    110114
    111 cluster_dmsg("\n[DBG] %s : PPM initialized in cluster %x at cycle %d\n",
    112 __FUNCTION__ , local_cxy , hal_get_cycles() );
     115#if CONFIG_DEBUG_CLUSTER_INIT
     116cycle = (uint32_t)hal_get_cycles();
     117if( CONFIG_DEBUG_CLUSTER_INIT < cycle )
     118cluster_dmsg("\n[DBG] %s : PPM initialized in cluster %x / cycle %d\n",
     119__FUNCTION__ , local_cxy , cycle );
     120#endif
    113121
    114122    // initialises embedded KHM
     
    132140        }
    133141
    134 cluster_dmsg("\n[DBG] %s : cores initialized in cluster %x at cycle %d\n",
    135 __FUNCTION__ , local_cxy , hal_get_cycles() );
     142#if CONFIG_DEBUG_CLUSTER_INIT
     143cycle = (uint32_t)hal_get_cycles();
     144if( CONFIG_DEBUG_CLUSTER_INIT < cycle )
     145cluster_dmsg("\n[DBG] %s : cores initialized in cluster %x / cycle %d\n",
     146__FUNCTION__ , local_cxy , cycle );
     147#endif
    136148
    137149    // initialises RPC fifo
     
    164176    }
    165177
    166 cluster_dmsg("\n[DBG] %s Process Manager initialized in cluster %x at cycle %d\n",
    167 __FUNCTION__ , local_cxy , hal_get_cycles() );
     178#if CONFIG_DEBUG_CLUSTER_INIT
     179cycle = (uint32_t)hal_get_cycles();
     180if( CONFIG_DEBUG_CLUSTER_INIT < cycle )
     181cluster_dmsg("\n[DBG] %s Process Manager initialized in cluster %x / cycle %d\n",
     182__FUNCTION__ , local_cxy , cycle );
     183#endif
    168184
    169185    hal_fence();
     
    215231//  Process related functions
    216232////////////////////////////////////////////////////////////////////////////////////
     233
     234
     235//////////////////////////////////////////////////////
     236xptr_t cluster_get_owner_process_from_pid( pid_t pid )
     237{
     238    xptr_t      root_xp;       // xptr on root of list of processes in owner cluster
     239    xptr_t      lock_xp;       // xptrr on lock protecting this list
     240    xptr_t      iter_xp;       // iterator
     241    xptr_t      current_xp;    // xptr on current process descriptor
     242    process_t * current_ptr;   // local pointer on current process
     243    pid_t       current_pid;   // current process identifier
     244    bool_t      found;
     245
     246    cluster_t * cluster = LOCAL_CLUSTER;
     247
     248    // get owner cluster and lpid
     249    cxy_t  owner_cxy = CXY_FROM_PID( pid );
     250
     251    // get lock & root of list of process in owner cluster
     252    root_xp = XPTR( owner_cxy , &cluster->pmgr.local_root );
     253    lock_xp = XPTR( owner_cxy , &cluster->pmgr.local_lock );
     254
     255    // take the lock protecting the list of processes
     256    remote_spinlock_lock( lock_xp );
     257
     258    // scan list of processes in owner cluster
     259    found = false;
     260    XLIST_FOREACH( root_xp , iter_xp )
     261    {
     262        current_xp  = XLIST_ELEMENT( iter_xp , process_t , local_list );
     263        current_ptr = GET_PTR( current_xp );
     264        current_pid = hal_remote_lw( XPTR( owner_cxy , &current_ptr->pid ) );
     265
     266        if( current_pid == pid )
     267        {
     268            found = true;
     269            break;
     270        }
     271    }
     272
     273    // release the lock protecting the list of processes
     274    remote_spinlock_unlock( lock_xp );
     275
     276    // return extended pointer on process descriptor in owner cluster
     277    if( found ) return current_xp;
     278    else        return XPTR_NULL;
     279}
    217280
    218281//////////////////////////////////////////////////////////
     
    442505
    443506    // skip one line
    444     printk("\n");
     507    printk("\n***** processes in cluster %x / cycle %d\n", cxy , (uint32_t)hal_get_cycles() );
    445508
    446509    // loop on all reference processes in cluster cxy
  • trunk/kernel/kern/cluster.h

    r428 r433  
    189189
    190190/******************************************************************************************
     191 * This function returns an extended pointer on the process descriptor in owner cluster
     192 * from the process PID. This PID can be be different from the calling process PID.
     193 * It can be called by any thread running in any cluster,
     194 ******************************************************************************************
     195 * @ pid  : process identifier.
     196 * @ return extended pointer on owner process if found / XPTR_NULL if not found.
     197 *****************************************************************************************/
     198xptr_t cluster_get_owner_process_from_pid( pid_t pid );
     199
     200/******************************************************************************************
    191201 * This function returns an extended pointer on the reference process descriptor
    192202 * from the process PID. This PID can be be different from the calling process PID.
     
    194204 ******************************************************************************************
    195205 * @ pid  : process identifier.
    196  * @ return extended pointer on reference process if success / return XPTR_NULL if error.
     206 * @ return extended pointer on reference process if found / XPTR_NULL if not found.
    197207 *****************************************************************************************/
    198208xptr_t cluster_get_reference_process_from_pid( pid_t pid );
  • trunk/kernel/kern/core.c

    r409 r433  
    7575}
    7676
    77 /* deprecated 14/08/2017 [AG]
    78 //////////////////////////////////////
    79 void core_time_update( core_t * core )
    80 {
    81         uint32_t elapsed;
    82         uint32_t ticks_nr   = core->ticks_nr;
    83         uint64_t cycles     = core->cycles;
    84         uint32_t time_stamp = core->time_stamp;
    85         uint32_t time_now   = hal_get_cycles();
    86 
    87         // compute number of elapsed cycles taking into account 32 bits register wrap
    88         if( time_now < time_stamp ) elapsed = (0xFFFFFFFF - time_stamp) + time_now;
    89         else                        elapsed = time_now - time_stamp;
    90 
    91         cycles  += elapsed;
    92         ticks_nr = elapsed / core->ticks_period;
    93 
    94         core->time_stamp     = time_now;
    95         core->cycles         = cycles + elapsed;
    96         core->ticks_nr       = ticks_nr + (elapsed / core->ticks_period);
    97         hal_fence();
    98 }
    99 */
    100 
    10177////////////////////////////////
    10278void core_clock( core_t * core )
     
    136112        hal_fence();
    137113
    138 #if CONFIG_SHOW_CPU_USAGE
    139         printk(INFO, "INFO: core %d in cluster %x : busy_percent = %d / cumulated_usage = %d\n",
    140                core->lid, local_cxy , busy_percent , usage );
    141 #endif
    142 
    143114        core->ticks_nr = 0;
    144115        idle->ticks_nr = 0;
  • trunk/kernel/kern/process.c

    r428 r433  
    44 * Authors  Ghassan Almaless (2008,2009,2010,2011,2012)
    55 *          Mohamed Lamine Karaoui (2015)
    6  *          Alain Greiner (2016,2017)
     6 *          Alain Greiner (2016,2017,2018)
    77 *
    88 * Copyright (c) UPMC Sorbonne Universites
     
    124124    model_pid  = hal_remote_lw( XPTR( model_cxy  , &model_ptr->pid ) );
    125125
    126 process_dmsg("\n[DBG] %s : core[%x,%d] enters / pid = %x / ppid = %x / model_pid = %x\n",
    127 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , pid , parent_pid , model_pid );
     126#if CONFIG_DEBUG_PROCESS_REFERENCE_INIT
     127uint32_t cycle = (uint32_t)hal_get_cycles();
     128if( CONFIG_DEBUG_PROCESS_REFERENCE_INIT )
     129printk("\n[DBG] %s : thread %x enter / pid = %x / ppid = %x / model_pid = %x / cycle %d\n",
     130__FUNCTION__ , CURRENT_THREAD , pid , parent_pid , model_pid , cycle );
     131#endif
    128132
    129133    // initialize PID, REF_XP, PARENT_XP, and STATE
    130         process->pid       = pid;
    131     process->ref_xp    = XPTR( local_cxy , process );
    132     process->parent_xp = parent_xp;
    133     process->state     = PROCESS_STATE_RUNNING;
     134        process->pid        = pid;
     135    process->ref_xp     = XPTR( local_cxy , process );
     136    process->parent_xp  = parent_xp;
     137    process->term_state = 0;
    134138
    135139    // initialize vmm as empty
     
    137141    assert( (error == 0) , __FUNCTION__ , "cannot initialize VMM\n" );
    138142 
    139 process_dmsg("\n[DBG] %s : core[%x,%d] / vmm inialised as empty for process %x\n",
    140 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , pid );
     143#if (CONFIG_DEBUG_PROCESS_REFERENCE_INIT & 1)
     144cycle = (uint32_t)hal_get_cycles();
     145if( CONFIG_DEBUG_PROCESS_REFERENCE_INIT )
     146printk("\n[DBG] %s : thread %x / vmm empty for process %x / cycle %d\n",
     147__FUNCTION__ , CURRENT_THREAD , pid , cycle );
     148#endif
    141149
    142150    // initialize fd_array as empty
     
    224232    remote_rwlock_init( XPTR( local_cxy , &process->cwd_lock ) );
    225233
    226 process_dmsg("\n[DBG] %s : core[%x,%d] / fd array initialised for process %x\n",
    227 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , pid );
     234#if (CONFIG_DEBUG_PROCESS_REFERENCE_INIT & 1)
     235cycle = (uint32_t)hal_get_cycles();
     236if( CONFIG_DEBUG_PROCESS_REFERENCE_INIT )
     237printk("\n[DBG] %s : thread %x / fd_array for process %x / cycle %d\n",
     238__FUNCTION__ , CURRENT_THREAD , pid , cycle );
     239#endif
    228240
    229241    // reset children list root
     
    260272        hal_fence();
    261273
    262 process_dmsg("\n[DBG] %s : core[%x,%d] exit for process %x\n",
    263 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , pid );
     274#if (CONFIG_DEBUG_PROCESS_REFERENCE_INIT & 1)
     275cycle = (uint32_t)hal_get_cycles();
     276if( CONFIG_DEBUG_PROCESS_REFERENCE_INIT )
     277printk("\n[DBG] %s : thread %x exit / process %x / cycle %d\n",
     278__FUNCTION__ , CURRENT_THREAD , pid , cycle );
     279#endif
    264280
    265281}  // process_reference_init()
     
    276292
    277293    // initialize PID, REF_XP, PARENT_XP, and STATE
    278     local_process->pid       = hal_remote_lw(  XPTR( ref_cxy , &ref_ptr->pid ) );
    279     local_process->parent_xp = hal_remote_lwd( XPTR( ref_cxy , &ref_ptr->parent_xp ) );
    280     local_process->ref_xp    = reference_process_xp;
    281     local_process->state     = PROCESS_STATE_RUNNING;
    282 
    283 process_dmsg("\n[DBG] %s : core[%x,%d] enters for process %x\n",
    284 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , local_process->pid );
     294    local_process->pid        = hal_remote_lw(  XPTR( ref_cxy , &ref_ptr->pid ) );
     295    local_process->parent_xp  = hal_remote_lwd( XPTR( ref_cxy , &ref_ptr->parent_xp ) );
     296    local_process->ref_xp     = reference_process_xp;
     297    local_process->term_state = 0;
     298
     299#if CONFIG_DEBUG_PROCESS_COPY_INIT
     300uint32_t cycle = (uint32_t)hal_get_cycles();
     301if( CONFIG_DEBUG_PROCESS_COPY_INIT )
     302printk("\n[DBG] %s : thread %x enter for process %x\n",
     303__FUNCTION__ , CURRENT_THREAD , local_process->pid );
     304#endif
    285305
    286306    // reset local process vmm
     
    327347        hal_fence();
    328348
    329 process_dmsg("\n[DBG] %s : core[%x,%d] exit for process %x\n",
    330 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , local_process->pid );
     349#if CONFIG_DEBUG_PROCESS_COPY_INIT
     350cycle = (uint32_t)hal_get_cycles();
     351if( CONFIG_DEBUG_PROCESS_COPY_INIT )
     352printk("\n[DBG] %s : thread %x exit for process %x\n",
     353__FUNCTION__ , CURRENT_THREAD , local_process->pid );
     354#endif
    331355
    332356    return 0;
     
    347371    "process %x in cluster %x has still active threads", process->pid , local_cxy );
    348372
    349 process_dmsg("\n[DBG] %s : core[%x,%d] enter for process %x\n",
    350 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , process->pid );
     373#if CONFIG_DEBUG_PROCESS_DESTROY
     374uint32_t cycle = (uint32_t)hal_get_cycles();
     375if( CONFIG_DEBUG_PROCESS_DESTROY )
     376printk("\n[DBG] %s : thread %x enter to destroy process %x (pid = %x) / cycle %d\n",
     377__FUNCTION__ , CURRENT_THREAD , process, process->pid , cycle );
     378#endif
    351379
    352380    // get local process manager pointer
     
    386414        xlist_unlink( XPTR( local_cxy , &process->children_list ) );
    387415        remote_spinlock_unlock( children_lock_xp );
    388 
    389         // get extende pointer on parent main thread
    390         parent_thread_xp = XPTR( parent_cxy ,
    391                            hal_remote_lpt( XPTR( parent_cxy , &parent_ptr->th_tbl[1] )));
    392        
    393         // unblock parent process main thread
    394         thread_unblock( parent_thread_xp , THREAD_BLOCKED_WAIT );
    395416    }
    396417
     
    411432    process_free( process );
    412433
    413 process_dmsg("\n[DBG] %s : core[%x,%d] exit for process %x\n",
    414 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , process->pid );
     434#if CONFIG_DEBUG_PROCESS_DESTROY
     435cycle = (uint32_t)hal_get_cycles();
     436if( CONFIG_DEBUG_PROCESS_DESTROY )
     437printk("\n[DBG] %s : thread %x exit / destroyed process %x (pid = %x) / cycle %d\n",
     438__FUNCTION__ , CURRENT_THREAD , process, process->pid, cycle );
     439#endif
    415440
    416441}  // end process_destroy()
     
    440465    uint32_t           responses;         // number of remote process copies
    441466    uint32_t           rsp_count;         // used to assert number of copies
    442 
    443467    rpc_desc_t         rpc;               // rpc descriptor allocated in stack
    444468
    445 process_dmsg("\n[DBG] %s : enter to %s process %x in cluster %x\n",
    446 __FUNCTION__ , process_action_str( action_type ) , process->pid , local_cxy );
     469#if CONFIG_DEBUG_PROCESS_SIGACTION
     470uint32_t cycle = (uint32_t)hal_get_cycles();
     471if( CONFIG_DEBUG_PROCESS_SIGACTION < cycle )
     472printk("\n[DBG] %s : thread %x enter to %s process %x in cluster %x / cycle %d\n",
     473__FUNCTION__ , CURRENT_THREAD, process_action_str( action_type ) ,
     474process->pid , local_cxy , cycle );
     475#endif
    447476
    448477    thread_t         * client = CURRENT_THREAD;
    449     xptr_t             client_xp = XPTR( local_cxy , client );
    450478
    451479    // get local pointer on local cluster manager
     
    492520        {
    493521
    494 process_dmsg("\n[DBG] %s : send RPC to remote cluster %x\n",
    495 __FUNCTION__ , process_cxy );
     522#if CONFIG_DEBUG_PROCESS_SIGACTION
     523if( CONFIG_DEBUG_PROCESS_SIGACTION < cycle )
     524printk("\n[DBG] %s : send RPC to remote cluster %x\n", __FUNCTION__ , process_cxy );
     525#endif
    496526
    497527            rpc.args[0] = (uint64_t)action_type;
     
    517547    }
    518548
    519 process_dmsg("\n[DBG] %s : make action in owner cluster %x\n",
    520 __FUNCTION__ , local_cxy );
    521 
     549#if CONFIG_DEBUG_PROCESS_SIGACTION
     550if( CONFIG_DEBUG_PROCESS_SIGACTION < cycle )
     551printk("\n[DBG] %s : make action in owner cluster %x\n", __FUNCTION__ , local_cxy );
     552#endif
    522553
    523554    // call directly the relevant function in local owner cluster
    524     if      (action_type == DELETE_ALL_THREADS  ) process_delete_threads ( process , client_xp );
    525     else if (action_type == BLOCK_ALL_THREADS   ) process_block_threads  ( process , client_xp );
    526     else if (action_type == UNBLOCK_ALL_THREADS ) process_unblock_threads( process             );
    527 
    528 process_dmsg("\n[DBG] %s : exit after %s process %x in cluster %x\n",
    529 __FUNCTION__ , process_action_str( action_type ) , process->pid , local_cxy );
     555    if      (action_type == DELETE_ALL_THREADS  ) process_delete_threads ( process );
     556    else if (action_type == BLOCK_ALL_THREADS   ) process_block_threads  ( process );
     557    else if (action_type == UNBLOCK_ALL_THREADS ) process_unblock_threads( process );
     558
     559#if CONFIG_DEBUG_PROCESS_SIGACTION
     560cycle = (uint32_t)hal_get_cycles();
     561if( CONFIG_DEBUG_PROCESS_SIGACTION < cycle )
     562printk("\n[DBG] %s : thread %x exit after %s process %x in cluster %x / cycle %d\n",
     563__FUNCTION__ , CURRENT_THREAD, process_action_str( action_type ) ,
     564process->pid , local_cxy , cycle );
     565#endif
    530566
    531567}  // end process_sigaction()
    532568
    533 ////////////////////////////////////////////////
    534 void process_block_threads( process_t * process,
    535                             xptr_t      client_xp )
     569/////////////////////////////////////////////////
     570void process_block_threads( process_t * process )
    536571{
    537572    thread_t          * target;         // pointer on target thread
     573    thread_t          * this;           // pointer on calling thread
    538574    uint32_t            ltid;           // index in process th_tbl
    539     thread_t          * requester;      // requesting thread pointer
    540575    uint32_t            count;          // requests counter
    541576    volatile uint32_t   rsp_count;      // responses counter
    542577
    543578    // get calling thread pointer
    544     requester = CURRENT_THREAD;
    545 
    546 sigaction_dmsg("\n[DBG] %s : enter for process %x in cluster %x\n",
    547 __FUNCTION__ , process->pid , local_cxy );
     579    this = CURRENT_THREAD;
     580
     581#if CONFIG_DEBUG_PROCESS_SIGACTION
     582uint32_t cycle = (uint32_t)hal_get_cycles();
     583if( CONFIG_DEBUG_PROCESS_SIGACTION < cycle )
     584printk("\n[DBG] %s : thread %x enter for process %x in cluster %x / cycle %d\n",
     585__FUNCTION__ , this , process->pid , local_cxy , cycle );
     586#endif
    548587
    549588    // get lock protecting process th_tbl[]
     
    559598        target = process->th_tbl[ltid];
    560599
     600        assert( (target != this) , __FUNCTION__ , "calling thread cannot be a target\n" );
     601
    561602        if( target != NULL )             // thread found
    562603        {
    563604            count++;
    564605
    565             // - if the target thread is the client thread, we do nothing,
    566             //   and we simply decrement the responses counter.
    567606            // - if the calling thread and the target thread are on the same core,
    568607            //   we block the target thread, we don't need confirmation from scheduler,
     
    572611            //   to be sure that the target thread is not running.
    573612           
    574             if( XPTR( local_cxy , target ) == client_xp )
    575             {
    576                 // decrement responses counter
    577                 hal_atomic_add( (void *)&rsp_count , -1 );
    578             }
    579             else if( requester->core->lid == target->core->lid )
     613            if( this->core->lid == target->core->lid )
    580614            {
    581615                // set the global blocked bit in target thread descriptor.
     
    612646    }
    613647
    614 sigaction_dmsg("\n[DBG] %s : exit for process %x in cluster %x / %d threads blocked\n",
    615 __FUNCTION__ , process->pid , local_cxy , count );
     648#if CONFIG_DEBUG_PROCESS_SIGACTION
     649cycle = (uint32_t)hal_get_cycles();
     650if( CONFIG_DEBUG_PROCESS_SIGACTION < cycle )
     651printk("\n[DBG] %s : thread %x exit for process %x in cluster %x / cycle %d\n",
     652__FUNCTION__ , this , process->pid , local_cxy , cycle );
     653#endif
    616654
    617655}  // end process_block_threads()
     
    621659{
    622660    thread_t          * target;        // pointer on target thead
     661    thread_t          * this;          // pointer on calling thread
    623662    uint32_t            ltid;          // index in process th_tbl
    624663    uint32_t            count;         // requests counter
    625664
    626 sigaction_dmsg("\n[DBG] %s : enter for process %x in cluster %x\n",
    627 __FUNCTION__ , process->pid , local_cxy );
     665    // get calling thread pointer
     666    this = CURRENT_THREAD;
     667
     668#if CONFIG_DEBUG_PROCESS_SIGACTION
     669uint32_t cycle = (uint32_t)hal_get_cycles();
     670if( CONFIG_DEBUG_PROCESS_SIGACTION < cycle )
     671printk("\n[DBG] %s : thread %x enter for process %x in cluster %x / cycle %d\n",
     672__FUNCTION__ , this , process->pid , local_cxy , cycle );
     673#endif
    628674
    629675    // get lock protecting process th_tbl[]
     
    636682        target = process->th_tbl[ltid];
    637683
     684        assert( (target != this) , __FUNCTION__ , "calling thread cannot be a target\n" );
     685
    638686        if( target != NULL )             // thread found
    639687        {
     
    648696    spinlock_unlock( &process->th_lock );
    649697
    650 sigaction_dmsg("\n[DBG] %s : exit for process %x in cluster %x / %d threads blocked\n",
    651 __FUNCTION__ , process->pid , local_cxy , count );
     698#if CONFIG_DEBUG_PROCESS_SIGACTION
     699cycle = (uint32_t)hal_get_cycles();
     700if( CONFIG_DEBUG_PROCESS_SIGACTION < cycle )
     701printk("\n[DBG] %s : thread %x exit for process %x in cluster %x / cycle %d\n",
     702__FUNCTION__ , this , process->pid , local_cxy , cycle );
     703#endif
    652704
    653705}  // end process_unblock_threads()
    654706
    655 /////////////////////////////////////////////////
    656 void process_delete_threads( process_t * process,
    657                              xptr_t      client_xp )
     707//////////////////////////////////////////////////
     708void process_delete_threads( process_t * process )
    658709{
    659710    thread_t          * target;        // pointer on target thread
     711    thread_t          * this;          // pointer on calling thread
    660712    uint32_t            ltid;          // index in process th_tbl
    661713    uint32_t            count;         // request counter
    662 
    663 sigaction_dmsg("\n[DBG] %s : enter for process %x in cluster %x at cycle %d\n",
    664 __FUNCTION__ , process->pid , local_cxy , (uint32_t)hal_get_cycles() );
     714    cxy_t               owner_cxy;     // owner cluster identifier
     715
     716    // get calling thread pointer
     717    this = CURRENT_THREAD;
     718    owner_cxy = CXY_FROM_PID( process->pid );
     719
     720#if CONFIG_DEBUG_PROCESS_SIGACTION
     721uint32_t cycle = (uint32_t)hal_get_cycles();
     722if( CONFIG_DEBUG_PROCESS_SIGACTION < cycle )
     723printk("\n[DBG] %s : thread %x enter for process %x in cluster %x / cycle %d\n",
     724__FUNCTION__ , this , process->pid , local_cxy , cycle );
     725#endif
    665726
    666727    // get lock protecting process th_tbl[]
     
    673734        target = process->th_tbl[ltid];
    674735
    675         if( target != NULL )             // thread found
     736        assert( (target != this) , __FUNCTION__ , "calling thread cannot be a target\n" );
     737
     738        if( target != NULL )            // thread found
    676739        {
    677740            count++;
    678 
    679             // delete only if the target is not the client
    680             if( XPTR( local_cxy , target ) != client_xp )
    681             { 
     741           
     742            // the main thread should not be deleted
     743            if( (owner_cxy != local_cxy) || (ltid != 0) ) 
     744            {
    682745                hal_atomic_or( &target->flags , THREAD_FLAG_REQ_DELETE );
    683746            }
     
    688751    spinlock_unlock( &process->th_lock );
    689752
    690 sigaction_dmsg("\n[DBG] %s : exit for process %x in cluster %x at cycle %d\n",
    691 __FUNCTION__ , process->pid , local_cxy , (uint32_t)hal_get_cycles() );
     753#if CONFIG_DEBUG_PROCESS_SIGACTION
     754cycle = (uint32_t)hal_get_cycles();
     755if( CONFIG_DEBUG_PROCESS_SIGACTION < cycle )
     756printk("\n[DBG] %s : thread %x exit for process %x in cluster %x / cycle %d\n",
     757__FUNCTION__ , this , process->pid , local_cxy , cycle );
     758#endif
    692759
    693760}  // end process_delete_threads()
     
    9881055    "parent process must be the reference process\n" );
    9891056
    990 fork_dmsg("\n[DBG] %s : core[%x,%d] enter at cycle %d\n",
    991 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid , (uint32_t)hal_get_cycles() );
     1057#if CONFIG_DEBUG_PROCESS_MAKE_FORK
     1058uint32_t cycle = (uint32_t)hal_get_cycles();
     1059if( CONFIG_DEBUG_PROCESS_MAKE_FORK < cycle )
     1060printk("\n[DBG] %s : thread %x enter for process %x / cycle %d\n",
     1061__FUNCTION__, CURRENT_THREAD, parent_pid, cycle );
     1062#endif
    9921063
    9931064    // allocate a process descriptor
     
    9991070        return -1;
    10001071    }
    1001 
    1002 fork_dmsg("\n[DBG] %s : core[%x,%d] created child process %x at cycle %d\n",
    1003  __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, process, (uint32_t)hal_get_cycles() );
    10041072
    10051073    // allocate a child PID from local cluster
     
    10121080        return -1;
    10131081    }
    1014 
    1015 fork_dmsg("\n[DBG] %s : core[%x, %d] child process PID = %x at cycle %d\n",
    1016  __FUNCTION__ , local_cxy, CURRENT_THREAD->core->lid, new_pid , (uint32_t)hal_get_cycles() );
    10171082
    10181083    // initializes child process descriptor from parent process descriptor
     
    10221087                            parent_process_xp );
    10231088
    1024 fork_dmsg("\n[DBG] %s : core[%x, %d] child process initialised at cycle %d\n",
    1025 __FUNCTION__ , local_cxy, CURRENT_THREAD->core->lid, hal_get_cycles() );
     1089#if CONFIG_DEBUG_PROCESS_MAKE_FORK
     1090cycle = (uint32_t)hal_get_cycles();
     1091if( CONFIG_DEBUG_PROCESS_MAKE_FORK < cycle )
     1092printk("\n[DBG] %s : thread %x created child_process %x / child_pid %x / cycle %d\n",
     1093__FUNCTION__, CURRENT_THREAD, process, new_pid, cycle );
     1094#endif
    10261095
    10271096    // copy VMM from parent descriptor to child descriptor
     
    10371106    }
    10381107
    1039 fork_dmsg("\n[DBG] %s : core[%x, %d] child process VMM copied at cycle %d\n",
    1040 __FUNCTION__ , local_cxy, CURRENT_THREAD->core->lid, (uint32_t)hal_get_cycles() );
     1108#if CONFIG_DEBUG_PROCESS_MAKE_FORK
     1109cycle = (uint32_t)hal_get_cycles();
     1110if( CONFIG_DEBUG_PROCESS_MAKE_FORK < cycle )
     1111printk("\n[DBG] %s : thread %x copied VMM from parent %x to child %x / cycle %d\n",
     1112__FUNCTION__ , CURRENT_THREAD , parent_pid, new_pid, cycle );
     1113#endif
    10411114
    10421115    // update extended pointer on .elf file
     
    10591132    assert( (thread->trdid == 0) , __FUNCTION__ , "main thread must have index 0\n" );
    10601133
    1061 fork_dmsg("\n[DBG] %s : core[%x,%d] child thread created at cycle %d\n",
    1062 __FUNCTION__ , local_cxy, CURRENT_THREAD->core->lid, (uint32_t)hal_get_cycles() );
    1063 
    1064     // update parent process GPT to set Copy_On_Write for shared data vsegs
     1134#if CONFIG_DEBUG_PROCESS_MAKE_FORK
     1135cycle = (uint32_t)hal_get_cycles();
     1136if( CONFIG_DEBUG_PROCESS_MAKE_FORK < cycle )
     1137printk("\n[DBG] %s : thread %x created child thread %x / cycle %d\n",
     1138__FUNCTION__ , CURRENT_THREAD, thread, cycle );
     1139#endif
     1140
     1141    // set Copy_On_Write flag in parent process GPT
    10651142    // this includes all replicated GPT copies
    10661143    if( parent_process_cxy == local_cxy )   // reference is local
     
    10741151    }
    10751152
    1076 fork_dmsg("\n[DBG] %s : core[%x,%d] COW set in parent_process at cycle %d\n",
    1077 __FUNCTION__ , local_cxy, CURRENT_THREAD->core->lid, (uint32_t)hal_get_cycles() );
     1153    // set Copy_On_Write flag in child process GPT
     1154    vmm_set_cow( process );
     1155 
     1156#if CONFIG_DEBUG_PROCESS_MAKE_FORK
     1157cycle = (uint32_t)hal_get_cycles();
     1158if( CONFIG_DEBUG_PROCESS_MAKE_FORK < cycle )
     1159printk("\n[DBG] %s : thread %x set COW in parent and child / cycle %d\n",
     1160__FUNCTION__ , CURRENT_THREAD, cycle );
     1161#endif
    10781162
    10791163    // get extended pointers on parent children_root, children_lock and children_nr
     
    10921176    *child_pid    = new_pid;
    10931177
    1094 
    1095 fork_dmsg("\n[DBG] %s : core[%x,%d] exit at cycle %d\n",
    1096 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, (uint32_t)hal_get_cycles() );
     1178#if CONFIG_DEBUG_PROCESS_MAKE_FORK
     1179cycle = (uint32_t)hal_get_cycles();
     1180if( CONFIG_DEBUG_PROCESS_MAKE_FORK < cycle )
     1181printk("\n[DBG] %s : thread %x exit / cycle %d\n",
     1182__FUNCTION__, CURRENT_THREAD, cycle );
     1183#endif
    10971184
    10981185    return 0;
     
    11051192{
    11061193    char           * path;                    // pathname to .elf file
    1107     pid_t            pid;                     // old_process PID given to new_process
    1108     pid_t            temp_pid;                // temporary PID given to old_process
     1194    pid_t            pid;                     // old_process PID / given to new_process
     1195    pid_t            temp_pid;                // temporary PID / given to old_process
    11091196    process_t      * old_process;             // local pointer on old process
     1197    thread_t       * old_thread;              // local pointer on old thread
    11101198    process_t      * new_process;             // local pointer on new process
    1111     thread_t       * new_thread;              // local pointer on main thread
    1112     pthread_attr_t   attr;                    // main thread attributes
     1199    thread_t       * new_thread;              // local pointer on new thread
     1200    xptr_t           parent_xp;               // extended pointer on parent process
     1201    pthread_attr_t   attr;                    // new thread attributes
    11131202    lid_t            lid;                     // selected core local index
    11141203        error_t          error;
    11151204
    1116         // get .elf pathname and PID from exec_info
     1205    // get old_thread / old_process / PID / parent_xp
     1206    old_thread  = CURRENT_THREAD;
     1207    old_process = old_thread->process;
     1208    pid         = old_process->pid;
     1209    parent_xp   = old_process->parent_xp;
     1210   
     1211        // get .elf pathname from exec_info
    11171212        path     = exec_info->path;
    1118     pid      = exec_info->pid;
    11191213
    11201214    // this function must be executed by a thread running in owner cluster
    11211215    assert( (CXY_FROM_PID( pid ) == local_cxy), __FUNCTION__,
    1122     "local cluster %x is not owner for process %x\n", local_cxy, pid );
    1123 
    1124 exec_dmsg("\n[DBG] %s : core[%x,%d] enters for process %x / %s / cycle %d\n",
    1125 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, pid, path, (uint32_t)hal_get_cycles() );
    1126 
    1127     // get old_process local pointer
    1128     old_process = (process_t *)cluster_get_local_process_from_pid( pid );
    1129    
    1130     if( old_process == NULL )
    1131     {
    1132         printk("\n[ERROR] in %s : cannot get old process descriptor\n", __FUNCTION__ );
    1133         return -1;
    1134     }
     1216    "local_cluster must be owner_cluster\n" );
     1217
     1218    assert( (LTID_FROM_TRDID( old_thread->trdid ) == 0) , __FUNCTION__,
     1219    "must be called by the main thread\n" );
     1220 
     1221#if CONFIG_DEBUG_PROCESS_MAKE_EXEC
     1222uint32_t cycle = (uint32_t)hal_get_cycles();
     1223if( CONFIG_DEBUG_PROCESS_MAKE_EXEC < cycle )
     1224printk("\n[DBG] %s : thread %x enters for process %x / %s / cycle %d\n",
     1225__FUNCTION__, old_thread, pid, path, cycle );
     1226#endif
    11351227
    11361228     // allocate memory for new_process descriptor
     
    11441236    }
    11451237
    1146     // get a new PID for old_process
     1238    // get a temporary PID for old_process
    11471239    error = cluster_pid_alloc( old_process , &temp_pid );
    11481240    if( error )
     
    11541246    }
    11551247
    1156     // request blocking for all threads in old_process (but the calling thread)
    1157     process_sigaction( old_process , BLOCK_ALL_THREADS );
    1158 
    1159     // request destruction for all threads in old_process (but the calling thread)
    1160     process_sigaction( old_process , DELETE_ALL_THREADS );
    1161 
    1162 exec_dmsg("\n[DBG] %s : core[%x,%d] marked old threads for destruction / cycle %d\n",
    1163 __FUNCTION__ , local_cxy, CURRENT_THREAD->core->lid , (uint32_t)hal_get_cycles() );
    1164 
    1165     // set new PID to old_process
     1248    // set temporary PID to old_process
    11661249    old_process->pid = temp_pid;
    11671250
     
    11691252    process_reference_init( new_process,
    11701253                            pid,
    1171                             old_process->parent_xp,             // parent_process_xp
    1172                             XPTR(local_cxy , old_process) );    // model_process_xp
     1254                            parent_xp,                          // parent_process_xp
     1255                            XPTR(local_cxy , old_process) );    // model_process
    11731256
    11741257    // give TXT ownership to new_process
    11751258    process_txt_set_ownership( XPTR( local_cxy , new_process ) );
    11761259
    1177 exec_dmsg("\n[DBG] %s : core[%x,%d] initialised new process %x / cycle %d \n",
    1178 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, new_process, (uint32_t)hal_get_cycles() );
     1260#if CONFIG_DEBUG_PROCESS_MAKE_EXEC
     1261cycle = (uint32_t)hal_get_cycles();
     1262if( CONFIG_DEBUG_PROCESS_MAKE_EXEC < cycle )
     1263printk("\n[DBG] %s : thread %x created new process %x / cycle %d \n",
     1264__FUNCTION__ , old_thread , new_process , cycle );
     1265#endif
    11791266
    11801267    // register code & data vsegs as well as entry-point in new process VMM,
     
    11881275        }
    11891276
    1190 exec_dmsg("\n[DBG] %s : core[%x,%d] vsegs registered in new process %x / cycle %d\n",
    1191 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, new_process, (uint32_t)hal_get_cycles() );
     1277#if CONFIG_DEBUG_PROCESS_MAKE_EXEC
     1278cycle = (uint32_t)hal_get_cycles();
     1279if( CONFIG_DEBUG_PROCESS_MAKE_EXEC < cycle )
     1280printk("\n[DBG] %s : thread %x registered code/data vsegs in new process %x / cycle %d\n",
     1281__FUNCTION__, old_thread , new_process->pid , cycle );
     1282#endif
    11921283
    11931284    // select a core in local cluster to execute the main thread
     
    12161307    assert( (new_thread->trdid == 0) , __FUNCTION__ , "main thread must have index 0\n" );
    12171308
    1218 exec_dmsg("\n[DBG] %s : core[%x,%d] created new_process main thread / cycle %d\n",
    1219 __FUNCTION__ , local_cxy, CURRENT_THREAD->core->lid, (uint32_t)hal_get_cycles() );
    1220 
    1221     // get pointers on parent process
    1222     xptr_t      parent_xp  = new_process->parent_xp;
     1309#if CONFIG_DEBUG_PROCESS_MAKE_EXEC
     1310cycle = (uint32_t)hal_get_cycles();
     1311if( CONFIG_DEBUG_PROCESS_MAKE_EXEC < cycle )
     1312printk("\n[DBG] %s : thread %x created new_process main thread %x / cycle %d\n",
     1313__FUNCTION__ , old_thread , new_thread , cycle );
     1314#endif
     1315
     1316    // get cluster and local pointer on parent process
    12231317    process_t * parent_ptr = GET_PTR( parent_xp );
    12241318    cxy_t       parent_cxy = GET_CXY( parent_xp );
     
    12351329    remote_spinlock_unlock( lock_xp );
    12361330
    1237 exec_dmsg("\n[DBG] %s : core[%x,%d] updated parent process children list / cycle %d\n",
    1238 __FUNCTION__ , local_cxy, CURRENT_THREAD->core->lid, (uint32_t)hal_get_cycles() );
    1239    
    1240     // block and mark calling thread for deletion
    1241     // only when it is an user thread
    1242     thread_t * this = CURRENT_THREAD;
    1243     if( this->type == THREAD_USER )
    1244     {
    1245         thread_block( this , THREAD_BLOCKED_GLOBAL );
    1246         hal_atomic_or( &this->flags , THREAD_FLAG_REQ_DELETE );
    1247     }
    1248 
    12491331    // activate new thread
    12501332        thread_unblock( XPTR( local_cxy , new_thread ) , THREAD_BLOCKED_GLOBAL );
    12511333
     1334    // request old_thread destruction => old_process destruction
     1335    thread_block( old_thread , THREAD_BLOCKED_GLOBAL );
     1336    hal_atomic_or( &old_thread->flags , THREAD_FLAG_REQ_DELETE );
     1337
    12521338    hal_fence();
    12531339
    1254 exec_dmsg("\n[DBG] %s : core[%x,%d] exit for path = %s / cycle %d\n",
    1255 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, path , (uint32_t)hal_get_cycles() );
    1256 
     1340#if CONFIG_DEBUG_PROCESS_MAKE_EXEC
     1341cycle = (uint32_t)hal_get_cycles();
     1342if( CONFIG_DEBUG_PROCESS_MAKE_EXEC < cycle )
     1343printk("\n[DBG] %s : old_thread %x blocked / new_thread %x activated / cycle %d\n",
     1344__FUNCTION__ , old_thread , new_thread , cycle );
     1345#endif
     1346   
    12571347        return 0;
    12581348
    12591349}  // end process_make_exec()
    12601350
    1261 ///////////////////////////////////////
    1262 void process_make_kill( pid_t      pid,
    1263                         uint32_t   sig_id )
    1264 {
    1265     // this function must be executed by a thread running in owner cluster
    1266     assert( (CXY_FROM_PID( pid ) == local_cxy) , __FUNCTION__ ,
    1267     "must execute in owner cluster" );
    1268 
     1351////////////////////////////////////////////
     1352void process_make_kill( process_t * process,
     1353                        bool_t      is_exit,
     1354                        uint32_t    exit_status )
     1355{
    12691356    thread_t * this = CURRENT_THREAD;
    12701357
    1271 kill_dmsg("\n[DBG] %s : core[%x,%d] enter / process %x / sig %d\n",
    1272 __FUNCTION__, local_cxy, this->core->lid, pid , sig_id );
    1273 
    1274     // get pointer on local target process descriptor
    1275     process_t * process = process_get_local_copy( pid );
    1276 
    1277     // does nothing if process does not exist
    1278     if( process == NULL )
    1279     {
    1280         printk("\n[WARNING] %s : process %x does not exist => do nothing\n",
    1281         __FUNCTION__ , pid );
    1282         return;
    1283     }
    1284 
    1285     // analyse signal type
    1286     switch( sig_id )
    1287     {
    1288         case SIGSTOP:     
    1289         {
    1290             // block all threads in all clusters
    1291             process_sigaction( process , BLOCK_ALL_THREADS );
    1292 
    1293             // remove TXT ownership to target process
    1294             process_txt_reset_ownership( XPTR( local_cxy , process ) );
    1295         }
    1296         break;
    1297         case SIGCONT:     // unblock all threads in all clusters
    1298         {
    1299             process_sigaction( process , UNBLOCK_ALL_THREADS );
    1300         }
    1301         break;
    1302         case SIGKILL:  // block all threads, then delete all threads
    1303         {
    1304             // block all threads in all clusters
    1305             process_sigaction( process , BLOCK_ALL_THREADS );
    1306 
    1307             // remove TXT ownership to target process
    1308             process_txt_reset_ownership( XPTR( local_cxy , process ) );
    1309 
    1310             // delete all threads (but the calling thread)
    1311             process_sigaction( process , DELETE_ALL_THREADS );
    1312 
    1313             // delete the calling thread if required
    1314             if( CURRENT_THREAD->process == process )
    1315             {
    1316                 // set REQ_DELETE flag
    1317                 hal_atomic_or( &this->flags , THREAD_FLAG_REQ_DELETE );
    1318 
    1319                 // deschedule
    1320                 sched_yield( "suicide after kill" );
    1321             }
    1322         }
    1323         break;
    1324     }
    1325 
    1326 kill_dmsg("\n[DBG] %s : core[%x,%d] exit / process %x / sig %d \n",
    1327 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, pid , sig_id );
     1358    assert( (CXY_FROM_PID( process->pid ) == local_cxy) , __FUNCTION__ ,
     1359    "must be executed in process owner cluster\n" );
     1360
     1361    assert( ( this->type == THREAD_RPC ) , __FUNCTION__ ,
     1362    "must be executed by an RPC thread\n" );
     1363
     1364#if CONFIG_DEBUG_PROCESS_MAKE_KILL
     1365uint32_t cycle = (uint32_t)hal_get_cycles();
     1366if( CONFIG_DEBUG_PROCESS_MAKE_KILL < cycle )
     1367printk("\n[DBG] %s : thread %x enter for process %x / cycle %d\n",
     1368__FUNCTION__, this , process->pid , cycle );
     1369#endif
     1370
     1371    // register exit_status in owner process descriptor
     1372    if( is_exit ) process->term_state = exit_status;
     1373
     1374    // atomically update owner process descriptor flags
     1375    if( is_exit ) hal_atomic_or( &process->term_state , PROCESS_FLAG_EXIT );
     1376    else          hal_atomic_or( &process->term_state , PROCESS_FLAG_KILL );
     1377
     1378    // remove TXT ownership from owner process descriptor
     1379    process_txt_reset_ownership( XPTR( local_cxy , process ) );
     1380
     1381    // block all process threads in all clusters
     1382    process_sigaction( process , BLOCK_ALL_THREADS );
     1383
     1384    // mark all process threads in all clusters for delete
     1385    process_sigaction( process , DELETE_ALL_THREADS );
     1386
     1387/* unused if sys_wait deschedules without blocking [AG]
     1388
     1389    // get cluster and pointers on reference parent process
     1390    xptr_t      parent_xp  = process->parent_xp;
     1391    process_t * parent_ptr = GET_PTR( parent_xp );
     1392    cxy_t       parent_cxy = GET_CXY( parent_xp );
     1393
     1394    // get loal pointer on parent main thread
     1395    thread_t * main_ptr = hal_remote_lpt( XPTR( parent_cxy , &parent_ptr->th_tbl[0] ) );
     1396 
     1397    // reset THREAD_BLOCKED_WAIT bit in parent process main thread
     1398    thread_unblock( XPTR( parent_cxy , main_ptr ) , THREAD_BLOCKED_WAIT );
     1399*/
     1400
     1401#if CONFIG_DEBUG_PROCESS_MAKE_KILL
     1402cycle = (uint32_t)hal_get_cycles();
     1403if( CONFIG_DEBUG_PROCESS_MAKE_KILL < cycle )
     1404printk("\n[DBG] %s : thread %x exit for process %x / cycle %d\n",
     1405__FUNCTION__, this, process->pid , cycle );
     1406#endif
    13281407
    13291408}  // end process_make_kill()
    1330 
    1331 /////////////////////////////////////////
    1332 void process_make_exit( pid_t       pid,
    1333                         uint32_t    status )
    1334 {
    1335     // this function must be executed by a thread running in owner cluster
    1336     assert( (CXY_FROM_PID( pid ) == local_cxy) , __FUNCTION__ ,
    1337     "must execute in owner cluster" );
    1338 
    1339     // get pointer on local process descriptor
    1340     process_t * process = process_get_local_copy( pid );
    1341 
    1342     // does nothing if process does not exist
    1343     if( process == NULL )
    1344     {
    1345         printk("\n[WARNING] %s : process %x does not exist => do nothing\n",
    1346         __FUNCTION__ , pid );
    1347         return;
    1348     }
    1349 
    1350     // block all threads in all clusters (but the calling thread)
    1351     process_sigaction( process , BLOCK_ALL_THREADS );
    1352 
    1353     // delete all threads in all clusters (but the calling thread)
    1354     process_sigaction( process , DELETE_ALL_THREADS );
    1355 
    1356     // delete the calling thread
    1357     hal_atomic_or( &CURRENT_THREAD->flags , THREAD_FLAG_REQ_DELETE );
    1358 
    1359     // deschedule
    1360     sched_yield( "suicide after exit" );
    1361 
    1362 }  // end process_make_exit()
    13631409
    13641410///////////////////////////////////////////////
     
    13661412{
    13671413
    1368 process_dmsg("\n[DBG] %s : core[%x,%d] enter at cycle %d\n",
    1369 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, (uint32_t)hal_get_cycles() );
     1414#if CONFIG_DEBUG_PROCESS_ZERO_CREATE
     1415uint32_t cycle = (uint32_t)hal_get_cycles();
     1416if( CONFIG_DEBUG_PROCESS_ZERO_CREATE < cycle )
     1417printk("\n[DBG] %s : thread %x enter / cycle %d\n", __FUNCTION__, CURRENT_THREAD, cycle );
     1418#endif
    13701419
    13711420    // initialize PID, REF_XP, PARENT_XP, and STATE
    1372     process->pid       = 0;
    1373     process->ref_xp    = XPTR( local_cxy , process );
    1374     process->parent_xp = XPTR_NULL;
    1375     process->state     = PROCESS_STATE_RUNNING;
     1421    process->pid        = 0;
     1422    process->ref_xp     = XPTR( local_cxy , process );
     1423    process->parent_xp  = XPTR_NULL;
     1424    process->term_state = 0;
    13761425
    13771426    // reset th_tbl[] array as empty
     
    13911440        hal_fence();
    13921441
    1393 process_dmsg("\n[DBG] %s : core[%x,%d] exit at cycle %d\n",
    1394 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , (uint32_t)hal_get_cycles() );
     1442#if CONFIG_DEBUG_PROCESS_ZERO_CREATE
     1443cycle = (uint32_t)hal_get_cycles();
     1444if( CONFIG_DEBUG_PROCESS_ZERO_CREATE < cycle )
     1445printk("\n[DBG] %s : thread %x exit / cycle %d\n", __FUNCTION__, CURRENT_THREAD, cycle );
     1446#endif
    13951447
    13961448}  // end process_zero_init()
     
    14061458    error_t          error;
    14071459
    1408 process_dmsg("\n[DBG] %s :  core[%x,%d] enters at cycle %d\n",
    1409 __FUNCTION__ , local_cxy, CURRENT_THREAD->core->lid );
     1460#if CONFIG_DEBUG_PROCESS_INIT_CREATE
     1461uint32_t cycle = (uint32_t)hal_get_cycles();
     1462if( CONFIG_DEBUG_PROCESS_INIT_CREATE < cycle )
     1463printk("\n[DBG] %s : thread %x enter / cycle %d\n", __FUNCTION__, CURRENT_THREAD, cycle );
     1464#endif
    14101465
    14111466    // allocates memory for process descriptor from local cluster
     
    14341489                            XPTR( local_cxy , &process_zero ),     // parent
    14351490                            XPTR( local_cxy , &process_zero ) );   // model
    1436 
    1437 process_dmsg("\n[DBG] %s : core[%x,%d] / initialisation done\n",
    1438 __FUNCTION__ , local_cxy, CURRENT_THREAD->core->lid );
    14391491
    14401492    // register "code" and "data" vsegs as well as entry-point
     
    14461498        process_destroy( process );
    14471499        }
    1448 
    1449 process_dmsg("\n[DBG] %s : core[%x,%d] vsegs registered / path = %s\n",
    1450 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, CONFIG_PROCESS_INIT_PATH );
    14511500
    14521501    // get extended pointers on process_zero children_root, children_lock
     
    14891538    hal_fence();
    14901539
    1491 process_dmsg("\n[DBG] %s : core[%x,%d] exit / main thread = %x\n",
    1492 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, thread );
     1540#if CONFIG_DEBUG_PROCESS_INIT_CREATE
     1541cycle = (uint32_t)hal_get_cycles();
     1542if( CONFIG_DEBUG_PROCESS_INIT_CREATE < cycle )
     1543printk("\n[DBG] %s : thread %x exit / cycle %d\n", __FUNCTION__, CURRENT_THREAD, cycle );
     1544#endif
    14931545
    14941546}  // end process_init_create()
    1495 
    1496 //////////////////////////////////////////
    1497 char * process_state_str( uint32_t state )
    1498 {
    1499     if     ( state == PROCESS_STATE_RUNNING ) return "RUNNING";
    1500     else if( state == PROCESS_STATE_KILLED  ) return "KILLED";
    1501     else if( state == PROCESS_STATE_EXITED  ) return "EXITED";
    1502     else                                      return "undefined";
    1503 }
    15041547
    15051548/////////////////////////////////////////
     
    15421585    // get PID and state
    15431586    pid   = hal_remote_lw( XPTR( process_cxy , &process_ptr->pid ) );
    1544     state = hal_remote_lw( XPTR( process_cxy , &process_ptr->state ) );
     1587    state = hal_remote_lw( XPTR( process_cxy , &process_ptr->term_state ) );
    15451588
    15461589    // get PPID
     
    15771620    if( owner_xp == process_xp )
    15781621    {
    1579         printk("PID %X | PPID %X | %s\t| %s (FG) | %X | %d | %s\n",
    1580         pid, ppid, process_state_str(state), txt_name, process_ptr, th_nr, elf_name );
     1622        printk("PID %X | PPID %X | STS %X | %s (FG) | %X | %d | %s\n",
     1623        pid, ppid, state, txt_name, process_ptr, th_nr, elf_name );
    15811624    }
    15821625    else
    15831626    {
    1584         printk("PID %X | PPID %X | %s\t| %s (BG) | %X | %d | %s\n",
    1585         pid, ppid, process_state_str(state), txt_name, process_ptr, th_nr, elf_name );
     1627        printk("PID %X | PPID %X | STS %X | %s (BG) | %X | %d | %s\n",
     1628        pid, ppid, state, txt_name, process_ptr, th_nr, elf_name );
    15861629    }
    15871630}  // end process_display()
     
    16321675    xptr_t      lock_xp;      // extended pointer on list lock in chdev
    16331676
    1634 process_dmsg("\n[DBG] %s : core[%x,%d] enter for process %x at cycle\n",
    1635 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, process->pid, (uint32_t)hal_get_cycles() );
     1677#if CONFIG_DEBUG_PROCESS_TXT_ATTACH
     1678uint32_t cycle = (uint32_t)hal_get_cycles();
     1679if( CONFIG_DEBUG_PROCESS_TXT_ATTACH < cycle )
     1680printk("\n[DBG] %s : thread %x enter for process %x / txt_id = %d  / cycle %d\n",
     1681__FUNCTION__, CURRENT_THREAD, process->pid, txt_id, cycle );
     1682#endif
    16361683
    16371684    // check process is reference
     
    16571704    remote_spinlock_unlock( lock_xp );
    16581705
    1659 process_dmsg("\n[DBG] %s : core[%x,%d] exit for process %x at cycle\n",
    1660 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, process->pid, (uint32_t)hal_get_cycles() );
     1706#if CONFIG_DEBUG_PROCESS_TXT_ATTACH
     1707cycle = (uint32_t)hal_get_cycles();
     1708if( CONFIG_DEBUG_PROCESS_TXT_ATTACH < cycle )
     1709printk("\n[DBG] %s : thread %x exit for process %x / txt_id = %d / cycle %d\n",
     1710__FUNCTION__, CURRENT_THREAD, process->pid, txt_id , cycle );
     1711#endif
    16611712
    16621713} // end process_txt_attach()
     
    16701721    xptr_t      lock_xp;      // extended pointer on list lock in chdev
    16711722
    1672 process_dmsg("\n[DBG] %s : core[%x,%d] enter for process %x at cycle\n",
    1673 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, process->pid, (uint32_t)hal_get_cycles() );
     1723#if CONFIG_DEBUG_PROCESS_TXT_DETACH
     1724uint32_t cycle = (uint32_t)hal_get_cycles();
     1725if( CONFIG_DEBUG_PROCESS_TXT_DETACH < cycle )
     1726printk("\n[DBG] %s : thread %x enter for process %x / cycle %d\n",
     1727__FUNCTION__, CURRENT_THREAD, process->pid , cycle );
     1728#endif
    16741729
    16751730    // check process is reference
     
    16901745    remote_spinlock_unlock( lock_xp );
    16911746   
    1692 process_dmsg("\n[DBG] %s : core[%x,%d] exit for process %x at cycle %d\n",
    1693 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, process->pid, (uint32_t)hal_get_cycles() );
     1747#if CONFIG_DEBUG_PROCESS_TXT_DETACH
     1748cycle = (uint32_t)hal_get_cycles();
     1749if( CONFIG_DEBUG_PROCESS_TXT_DETACH < cycle )
     1750printk("\n[DBG] %s : thread %x exit for process %x / cycle %d\n",
     1751__FUNCTION__, CURRENT_THREAD, process->pid, cycle );
     1752#endif
    16941753
    16951754} // end process_txt_detach()
     
    17321791    xptr_t      file_xp;         // extended pointer on TXT_RX pseudo file
    17331792    xptr_t      txt_xp;          // extended pointer on TXT_RX chdev
    1734     chdev_t   * txt_ptr;
    1735     cxy_t       txt_cxy;
     1793    chdev_t   * txt_ptr;         // local pointer on TXT_RX chdev
     1794    cxy_t       txt_cxy;         // cluster of TXT_RX chdev
     1795    uint32_t    txt_id;          // TXT_RX channel
    17361796    xptr_t      owner_xp;        // extended pointer on current TXT_RX owner
    17371797    xptr_t      root_xp;         // extended pointer on root of attached process list
    17381798    xptr_t      iter_xp;         // iterator for xlist
    17391799    xptr_t      current_xp;      // extended pointer on current process
    1740     process_t * current_ptr;
    1741     cxy_t       current_cxy;
    1742     pid_t       ppid;
     1800    process_t * current_ptr;     // local pointer on current process
     1801    cxy_t       current_cxy;     // cluster for current process
     1802    pid_t       ppid;            // parent process identifier for current process
    17431803
    17441804    // get cluster and local pointer on process
     
    17521812    txt_xp  = chdev_from_file( file_xp );
    17531813    txt_cxy = GET_CXY( txt_xp );
    1754     txt_ptr = (chdev_t *)GET_PTR( txt_xp );
    1755 
    1756     // get extended pointer on TXT_RX owner
     1814    txt_ptr = GET_PTR( txt_xp );
     1815
     1816    // get extended pointer on TXT_RX owner and TXT channel
    17571817    owner_xp = hal_remote_lwd( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) );
     1818    txt_id   = hal_remote_lw ( XPTR( txt_cxy , &txt_ptr->channel ) );
    17581819
    17591820    // transfer ownership to KSH if required
    1760     if( owner_xp == process_xp )   
     1821    if( (owner_xp == process_xp) && (txt_id > 0) )   
    17611822    {
    17621823        // get extended pointer on root of list of attached processes
     
    17821843            }
    17831844        }
    1784     }
    1785 
    1786     assert( false , __FUNCTION__ , "KSH process not found" );
    1787 
     1845
     1846        assert( false , __FUNCTION__ , "KSH process not found" );
     1847    }
    17881848}  // end process_txt_reset_ownership()
    17891849
  • trunk/kernel/kern/process.h

    r428 r433  
    44 * Authors  Ghassan Almaless (2008,2009,2010,2011,2012)
    55 *          Mohamed Lamine Karaoui (2015)
    6  *          Alain Greiner (2016,2017)
     6 *          Alain Greiner (2016,2017,2018)
    77 *
    88 * Copyright (c) UPMC Sorbonne Universites
     
    6565
    6666/*********************************************************************************************
    67  * This enum defines the process states for ALMOS_MKH.
    68  ********************************************************************************************/
    69 
    70 enum process_states
    71 {
    72     PROCESS_STATE_RUNNING = 0,           /*! process is executing                           */
    73     PROCESS_STATE_STOPPED = 1,           /*! process has been stopped by a signal           */
    74     PROCESS_STATE_KILLED  = 2,           /*! process has been killed by a signal            */
    75     PROCESS_STATE_EXITED  = 3,           /*! process terminated with an exit                */
    76 };
     67 * The termination state is a 32 bits word:
     68 * - the 8 LSB bits contain the user defined exit status
     69 * - the 24 other bits contain the flags defined below
     70 ********************************************************************************************/
     71
     72#define PROCESS_FLAG_BLOCK 0x100  /*! process received as SIGSTOP signal                    */
     73#define PROCESS_FLAG_KILL  0x200  /*! process terminated by a sys_kill()                    */
     74#define PROCESS_FLAG_EXIT  0x400  /*! process terminated by a sys_exit()                    */
     75#define PROCESS_FLAG_WAIT  0x800  /*! parent process executed successfully a sys_wait()     */
    7776
    7877/*********************************************************************************************
     
    118117 * 6) The <local_list>, <copies_list>, <th_tbl>, <th_nr>, <th_lock> fields
    119118 *    are defined in all process descriptors copies.
     119 * 7) The termination <flags> and <exit_status> are only defined in the reference cluster.
    120120 ********************************************************************************************/
    121121
     
    155155    remote_spinlock_t sync_lock;        /*! lock protecting sem,mutex,barrier,condvar lists */
    156156
    157     uint32_t          state;            /*! RUNNING / STOPPED / KILLED / EXITED             */
     157    uint32_t          term_state;       /*! termination status (flags & exit status)        */
    158158
    159159    bool_t            txt_owner;        /*! current TXT owner                               */
     
    168168typedef struct exec_info_s
    169169{
    170     pid_t              pid;            /*! process identifier (both parent and child)       */
    171 
    172170    char               path[CONFIG_VFS_MAX_PATH_LENGTH];   /*!  .elf file path              */
    173171
     
    276274
    277275/*********************************************************************************************
    278  * This function returns a printable string defining the process state.
    279  *********************************************************************************************
    280  * @ state   : RUNNING / BLOCKED / EXITED / KILLED
    281  * @ return a string pointer.
    282  ********************************************************************************************/
    283 char * process_state_str( uint32_t state );
    284 
    285 /*********************************************************************************************
    286276 * This debug function diplays on the kernel terminal TXT0 detailed informations on a
    287277 * reference process identified by the <process_xp> argument.
     
    324314
    325315/*********************************************************************************************
    326  * This function blocks all threads (but the client thread defined by the <client_xp>
    327  * argument) for a given <process> in a given cluster.
     316 * This function blocks all threads for a given <process> in a given cluster.
     317 * The calling thread cannot be a target thread.
    328318 * It loops on all local threads of the process, set the THREAD_BLOCKED_GLOBAL bit,
    329319 * and request the relevant schedulers to acknowledge the blocking, using IPI if required.
     
    332322 *********************************************************************************************
    333323 * @ process     : pointer on the target process descriptor.
    334  * @ client_xp   : extended pointer on the client thread, that should not be blocked.
    335  ********************************************************************************************/
    336 void process_block_threads( process_t * process,
    337                             xptr_t      client_xp );
     324 ********************************************************************************************/
     325void process_block_threads( process_t * process );
    338326
    339327/*********************************************************************************************
     
    345333
    346334/*********************************************************************************************
    347  * This function delete all threads, (but the client thread defined by the <client_xp>
    348  * argument) for a given <process> in a given cluster.
     335 * This function marks for deletion all threads - but one _ for a given <process>
     336 * in a given cluster. The main thread in owner cluster is NOT marked.
     337 * It will be marked for deletion by the parent process sys_wait().
     338 * The calling thread cannot be a target thread.
    349339 * It loops on all local threads of the process, and set the THREAD_FLAG_REQ_DELETE bit.
    350340 * For each marked thread, the following actions will be done by the scheduler at the next
     
    357347 *********************************************************************************************
    358348 * @ process     : pointer on the process descriptor.
    359  * @ client_xp   : extended pointer on the client thread, that should not be deleted.
    360  ********************************************************************************************/
    361 void process_delete_threads( process_t * process,
    362                              xptr_t      client_xp );
     349 ********************************************************************************************/
     350void process_delete_threads( process_t * process );
    363351
    364352/*********************************************************************************************
     
    396384 * associated "child" thread descriptor in the local cluster. This function can involve
    397385 * up to three different clusters :
    398  * - the local (child) cluster can be any cluster defined by the sys_fork function.
     386 * - the child (local) cluster can be any cluster defined by the sys_fork function.
    399387 * - the parent cluster must be the reference cluster for the parent process.
    400388 * - the client cluster containing the thread requesting the fork can be any cluster.
     
    416404
    417405/*********************************************************************************************
    418  * This function implement the "exit" system call, and is called by the sys_exit() function.
    419  * It must be executed by a thread running in the calling process owner cluster.
    420  * It uses twice the multicast RPC_PROCESS_SIGNAL to first block all process threads
    421  * in all clusters, and then delete all threads and process descriptors.
    422  *********************************************************************************************
    423  * @ pid      : process identifier.
    424  * @ status   : exit return value.
    425  ********************************************************************************************/
    426 void process_make_exit( pid_t       pid,
    427                         uint32_t    status );
    428 
    429 /*********************************************************************************************
    430  * This function implement the "kill" system call, and is called by the sys_kill() function.
    431  * It must be executed by a thread running in the target process owner cluster.
    432  * Only the SIGKILL, SIGSTOP, and SIGCONT signals are supported.
    433  * User defined handlers are not supported.
    434  * It uses once or twice the multicast RPC_PROCESS_SIGNAL to block, unblock or delete
    435  * all process threads in all clusters, and then delete process descriptors.
    436  *********************************************************************************************
    437  * @ pid     : process identifier.
    438  * @ sig_id  : signal type.
    439  ********************************************************************************************/
    440 void process_make_kill( pid_t     pid,
    441                         uint32_t  sig_id );
     406 * This function is called by both the sys_kill() and sys_exit() system calls.
     407 * It must be executed by an RPC thread running in the target process owner cluster.
     408 * It uses twice the process_sigaction() function:
     409 * - first, to block all target process threads, in all clusters.
     410 * - second, to delete all target process threads in all clusters.
     411 * Finally, it synchronizes with the parent process sys_wait() function that MUST be called
     412 * by the parent process main thread.
     413 *********************************************************************************************
     414 * @ process      : pointer on process descriptor in owner cluster.
     415 * @ is_exit      : true when called by sys_exit() / false when called by sys_kill().
     416 * @ exit_status  : exit status, when called by sys_exit().
     417 ********************************************************************************************/
     418void process_make_kill( process_t * process,
     419                        bool_t      is_exit,
     420                        uint32_t    exit_status );
    442421
    443422
  • trunk/kernel/kern/rpc.c

    r428 r433  
    4242#include <rpc.h>
    4343
     44
     45/////////////////////////////////////////////////////////////////////////////////////////
     46//        Debug macros for marshalling functions
     47/////////////////////////////////////////////////////////////////////////////////////////
     48
     49#if CONFIG_DEBUG_RPC_MARSHALING
     50
     51#define RPC_DEBUG_ENTER                                                                \
     52uint32_t cycle = (uint32_t)hal_get_cycles();                                           \
     53if( cycle > CONFIG_DEBUG_RPC_MARSHALING )                                              \
     54printk("\n[DBG] %s : enter thread %x on core[%x,%d] / cycle %d\n",                     \
     55__FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, CURRENT_THREAD->core->lid , cycle );
     56
     57#define RPC_DEBUG_EXIT                                                                 \
     58cycle = (uint32_t)hal_get_cycles();                                                    \
     59if( cycle > CONFIG_DEBUG_RPC_MARSHALING )                                              \
     60printk("\n[DBG] %s : exit thread %x on core[%x,%d] / cycle %d\n",                      \
     61__FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, CURRENT_THREAD->core->lid , cycle );
     62
     63#else
     64
     65#define RPC_DEBUG_ENTER
     66
     67#define RPC_DEBUG_EXIT
     68
     69#endif
     70
    4471/////////////////////////////////////////////////////////////////////////////////////////
    4572//      array of function pointers  (must be consistent with enum in rpc.h)
     
    5077    &rpc_pmem_get_pages_server,         // 0
    5178    &rpc_pmem_release_pages_server,     // 1
    52     &rpc_process_make_exec_server,      // 2
     79    &rpc_undefined,                     // 2    unused slot
    5380    &rpc_process_make_fork_server,      // 3
    54     &rpc_process_make_exit_server,      // 4
     81    &rpc_undefined,                     // 4    unused slot
    5582    &rpc_process_make_kill_server,      // 5
    5683    &rpc_thread_user_create_server,     // 6
     
    6895    &rpc_vfs_mapper_load_all_server,    // 17
    6996    &rpc_fatfs_get_cluster_server,      // 18
    70     &rpc_undefined,                     // 19
     97    &rpc_undefined,                     // 19   unused slot
    7198
    7299    &rpc_vmm_get_vseg_server,           // 20
     
    497524
    498525/////////////////////////////////////////////////////////////////////////////////////////
    499 // [2]           Marshaling functions attached to RPC_PROCESS_MAKE_EXEC (blocking)
    500 /////////////////////////////////////////////////////////////////////////////////////////
    501 
    502 /////////////////////////////////////////////////////
    503 void rpc_process_make_exec_client( cxy_t         cxy,
    504                                    exec_info_t * info,     // in
    505                                    error_t     * error )   // out
    506 {
    507 rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
    508 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    509 CURRENT_THREAD->core->lid , hal_time_stamp() );
    510 
    511     assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");
    512 
    513     // initialise RPC descriptor header
    514     rpc_desc_t  rpc;
    515     rpc.index    = RPC_PROCESS_MAKE_EXEC;
    516     rpc.response = 1;
    517     rpc.blocking = true;
    518 
    519     // set input arguments in RPC descriptor 
    520     rpc.args[0] = (uint64_t)(intptr_t)info;
    521 
    522     // register RPC request in remote RPC fifo (blocking function)
    523     rpc_send( cxy , &rpc );
    524 
    525     // get output arguments from RPC descriptor
    526     *error  = (error_t)rpc.args[1];     
    527 
    528 rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
    529 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    530 CURRENT_THREAD->core->lid , hal_time_stamp() );
    531 }
    532 
    533 //////////////////////////////////////////////
    534 void rpc_process_make_exec_server( xptr_t xp )
    535 {
    536 rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
    537 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    538 CURRENT_THREAD->core->lid , hal_time_stamp() );
    539 
    540     exec_info_t * ptr;       // local pointer on remote exec_info structure
    541     exec_info_t   info;      // local copy of exec_info structure
    542     error_t       error;     // local error error status
    543 
    544     // get client cluster identifier and pointer on RPC descriptor
    545     cxy_t        client_cxy  = (cxy_t)GET_CXY( xp );
    546     rpc_desc_t * desc        = (rpc_desc_t *)GET_PTR( xp );
    547 
    548     // get pointer on exec_info structure in client cluster from RPC descriptor
    549     ptr = (exec_info_t*)(intptr_t)hal_remote_lwd( XPTR( client_cxy , &desc->args[0] ) );
    550 
    551     // copy exec_info structure from client buffer to server buffer
    552     hal_remote_memcpy( XPTR( client_cxy , ptr ),
    553                        XPTR( local_cxy , &info ),
    554                        sizeof(exec_info_t) );
    555 
    556     // call local kernel function
    557     error = process_make_exec( &info );
    558 
    559     // set output argument into client RPC descriptor
    560     hal_remote_swd( XPTR( client_cxy , &desc->args[1] ) , (uint64_t)error );
    561 
    562 rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
    563 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    564 CURRENT_THREAD->core->lid , hal_time_stamp() );
    565 }
     526// [2]      undefined slot
     527/////////////////////////////////////////////////////////////////////////////////////////
    566528
    567529/////////////////////////////////////////////////////////////////////////////////////////
     
    644606
    645607/////////////////////////////////////////////////////////////////////////////////////////
    646 // [4]      Marshaling functions attached to RPC_PROCESS_MAKE_EXIT (blocking)
     608// [4]      undefined slot
     609/////////////////////////////////////////////////////////////////////////////////////////
     610
     611/////////////////////////////////////////////////////////////////////////////////////////
     612// [5]      Marshaling functions attached to RPC_PROCESS_MAKE_KILL (blocking)
    647613/////////////////////////////////////////////////////////////////////////////////////////
    648614
    649615///////////////////////////////////////////////////
    650 void rpc_process_make_exit_client( cxy_t       cxy,
    651                                    pid_t       pid,
     616void rpc_process_make_kill_client( cxy_t       cxy,
     617                                   process_t * process,
     618                                   bool_t      is_exit,
    652619                                   uint32_t    status )
    653620{
     
    656623CURRENT_THREAD->core->lid , hal_time_stamp() );
    657624
    658     assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");
    659 
    660     // initialise RPC descriptor header
    661     rpc_desc_t  rpc;
    662     rpc.index    = RPC_PROCESS_MAKE_EXIT;
     625    // initialise RPC descriptor header
     626    rpc_desc_t  rpc;
     627    rpc.index    = RPC_PROCESS_MAKE_KILL;
    663628    rpc.response = 1;
    664629    rpc.blocking = true;
    665630
    666631    // set input arguments in RPC descriptor 
    667     rpc.args[0] = (uint64_t)pid;
    668     rpc.args[1] = (uint64_t)status;
     632    rpc.args[0] = (uint64_t)(intptr_t)process;
     633    rpc.args[1] = (uint64_t)is_exit;
     634    rpc.args[2] = (uint64_t)status;
    669635
    670636    // register RPC request in remote RPC fifo (blocking function)
     
    677643
    678644//////////////////////////////////////////////
    679 void rpc_process_make_exit_server( xptr_t xp )
     645void rpc_process_make_kill_server( xptr_t xp )
    680646{
    681647rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
     
    683649CURRENT_THREAD->core->lid , hal_time_stamp() );
    684650
    685     pid_t     pid;
    686     uint32_t  status; 
     651    process_t * process;
     652    bool_t      is_exit;
     653    uint32_t    status;
    687654
    688655    // get client cluster identifier and pointer on RPC descriptor
     
    691658
    692659    // get arguments from RPC descriptor
    693     pid    = (uint32_t)hal_remote_lwd( XPTR( client_cxy , &desc->args[0] ) );
    694     status = (uint32_t)hal_remote_lwd( XPTR( client_cxy , &desc->args[1] ) );
     660    process = (process_t *)(intptr_t)hal_remote_lwd( XPTR( client_cxy , &desc->args[0] ) );
     661    is_exit = (bool_t)               hal_remote_lwd( XPTR( client_cxy , &desc->args[1] ) );
     662    status  = (uint32_t)             hal_remote_lwd( XPTR( client_cxy , &desc->args[2] ) );
    695663
    696664    // call local kernel function
    697     process_make_exit( pid , status );
     665    process_make_kill( process , is_exit , status );
    698666
    699667rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
     
    703671
    704672/////////////////////////////////////////////////////////////////////////////////////////
    705 // [5]      Marshaling functions attached to RPC_PROCESS_MAKE_KILL (blocking)
    706 /////////////////////////////////////////////////////////////////////////////////////////
    707 
    708 ///////////////////////////////////////////////////
    709 void rpc_process_make_kill_client( cxy_t       cxy,
    710                                    pid_t       pid,
    711                                    uint32_t    sig_id )
    712 {
    713 rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
    714 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    715 CURRENT_THREAD->core->lid , hal_time_stamp() );
    716 
    717     assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");
    718 
    719     // initialise RPC descriptor header
    720     rpc_desc_t  rpc;
    721     rpc.index    = RPC_PROCESS_MAKE_KILL;
    722     rpc.response = 1;
    723     rpc.blocking = true;
    724 
    725     // set input arguments in RPC descriptor 
    726     rpc.args[0] = (uint64_t)pid;
    727     rpc.args[1] = (uint64_t)sig_id;
    728 
    729     // register RPC request in remote RPC fifo (blocking function)
    730     rpc_send( cxy , &rpc );
    731 
    732 rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
    733 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    734 CURRENT_THREAD->core->lid , hal_time_stamp() );
    735 
    736 
    737 //////////////////////////////////////////////
    738 void rpc_process_make_kill_server( xptr_t xp )
    739 {
    740 rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
    741 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    742 CURRENT_THREAD->core->lid , hal_time_stamp() );
    743 
    744     pid_t       pid;
    745     uint32_t    sig_id;
    746 
    747     // get client cluster identifier and pointer on RPC descriptor
    748     cxy_t        client_cxy  = (cxy_t)GET_CXY( xp );
    749     rpc_desc_t * desc        = (rpc_desc_t *)GET_PTR( xp );
    750 
    751     // get arguments from RPC descriptor
    752     pid    = (uint32_t)hal_remote_lwd( XPTR( client_cxy , &desc->args[0] ) );
    753     sig_id = (uint32_t)hal_remote_lwd( XPTR( client_cxy , &desc->args[1] ) );
    754 
    755     // call local kernel function
    756     process_make_exit( pid , sig_id );
    757 
    758 rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
    759 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    760 CURRENT_THREAD->core->lid , hal_time_stamp() );
    761 }
    762 
    763 /////////////////////////////////////////////////////////////////////////////////////////
    764 // [6]           Marshaling functions attached to RPC_THREAD_USER_CREATE (blocking)               
     673// [6]           Marshaling functions attached to RPC_THREAD_USER_CREATE (blocking) 
    765674/////////////////////////////////////////////////////////////////////////////////////////
    766675
     
    1036945
    1037946    // call relevant kernel function
    1038     if      (action == DELETE_ALL_THREADS  ) process_delete_threads ( process , client_xp );
    1039     else if (action == BLOCK_ALL_THREADS   ) process_block_threads  ( process , client_xp );
    1040     else if (action == UNBLOCK_ALL_THREADS ) process_unblock_threads( process             );
     947    if      (action == DELETE_ALL_THREADS  ) process_delete_threads ( process );
     948    else if (action == BLOCK_ALL_THREADS   ) process_block_threads  ( process );
     949    else if (action == UNBLOCK_ALL_THREADS ) process_unblock_threads( process );
    1041950
    1042951    // decrement the responses counter in RPC descriptor,
  • trunk/kernel/kern/rpc.h

    r428 r433  
    6262    RPC_PMEM_GET_PAGES         = 0,
    6363    RPC_PMEM_RELEASE_PAGES     = 1,
    64     RPC_PROCESS_MAKE_EXEC      = 2,     
     64    RPC_UNDEFINED_2            = 2,     
    6565    RPC_PROCESS_MAKE_FORK      = 3,
    66     RPC_PROCESS_MAKE_EXIT      = 4,
     66    RPC_UNDEFINED_4            = 4,
    6767    RPC_PROCESS_MAKE_KILL      = 5,
    6868    RPC_THREAD_USER_CREATE     = 6,
     
    8080    RPC_VFS_MAPPER_LOAD_ALL    = 17,
    8181    RPC_FATFS_GET_CLUSTER      = 18,
     82    RPC_UNDEFINED_19           = 19,
    8283
    8384    RPC_VMM_GET_VSEG           = 20,
     
    210211
    211212/***********************************************************************************
    212  * [2] The RPC_PROCESS_MAKE_EXEC creates a new process descriptor, from an existing
    213  * process descriptor in a remote server cluster. This server cluster must be
    214  * the owner cluster for the existing process. The new process descriptor is
    215  * initialized from informations found in the <exec_info> structure.
    216  * A new main thread descriptor is created in the server cluster.
    217  * All copies of the old process descriptor and all old threads are destroyed.
    218  ***********************************************************************************
    219  * @ cxy     : server cluster identifier.
    220  * @ process : [in]  local pointer on the exec_info structure in client cluster.
    221  * @ error   : [out] error status (0 if success).
    222  **********************************************************************************/
    223 void rpc_process_make_exec_client( cxy_t                cxy,
    224                                    struct exec_info_s * info,
    225                                    error_t            * error );
    226 
    227 void rpc_process_make_exec_server( xptr_t xp );
     213 * [2] undefined slot
     214 **********************************************************************************/
    228215
    229216/***********************************************************************************
     
    251238
    252239/***********************************************************************************
    253  * [4] The RPC_PROCESS_MAKE_EXIT can be called by any thread to request the owner
    254  * cluster to execute the process_make_exit() function for the target process.
    255  ***********************************************************************************
    256  * @ cxy      : owner cluster identifier.
    257  * @ pid      : target process identifier.
    258  * @ status   : calling process exit status.
    259  **********************************************************************************/
    260 void rpc_process_make_exit_client( cxy_t              cxy,
    261                                    pid_t              pid,
    262                                    uint32_t           status );
    263 
    264 void rpc_process_make_exit_server( xptr_t xp );
     240 * [4] undefined slot
     241 **********************************************************************************/
    265242
    266243/***********************************************************************************
     
    269246 ***********************************************************************************
    270247 * @ cxy      : owner cluster identifier.
    271  * @ pid      : target process identifier.
    272  * @ seg_id   : signal type (only SIGKILL / SIGSTOP / SIGCONT are supported).
     248 * @ process  : pointer on process in owner cluster.
     249 * @ is_exit  : true if called by sys_exit() / false if called by sys_kill()
     250 * @ status   : exit status (only when called by sys_exit()
    273251 **********************************************************************************/
    274252void rpc_process_make_kill_client( cxy_t              cxy,
    275                                    pid_t              pid,
    276                                    uint32_t           seg_id );
     253                                   struct process_s * process,
     254                                   bool_t             is_exit,
     255                                   uint32_t           status );
    277256
    278257void rpc_process_make_kill_server( xptr_t xp );
     
    517496
    518497/***********************************************************************************
     498 * [19] undefined slot
     499 **********************************************************************************/
     500
     501/***********************************************************************************
    519502 * [20] The RPC_VMM_GET_VSEG returns an extended pointer
    520503 * on the vseg containing a given virtual address in a given process.
  • trunk/kernel/kern/scheduler.c

    r428 r433  
    178178
    179179///////////////////////////////////////////
    180 void sched_handle_requests( core_t * core )
     180void sched_handle_signals( core_t * core )
    181181{
    182182    list_entry_t * iter;
     
    231231            thread_destroy( thread );
    232232
    233 sched_dmsg("\n[DBG] %s : thread %x deleted thread %x / cycle %d\n",
    234 __FUNCTION__ , CURRENT_THREAD , thread , (uint32_t)hal_get_cycles() );
    235 
     233#if CONFIG_DEBUG_SCHED_HANDLE_SIGNALS
     234uint32_t cycle = (uint32_t)hal_get_cycles();
     235if( CONFIG_DEBUG_SCHED_HANDLE_SIGNALS < cycle )
     236printk("\n[DBG] %s : thread %x deleted thread %x / cycle %d\n",
     237__FUNCTION__ , CURRENT_THREAD , thread , cycle );
     238#endif
    236239            // destroy process descriptor if no more threads
    237240            if( process->th_nr == 0 )
     
    240243                process_destroy( process );
    241244
    242 sched_dmsg("\n[DBG] %s : thread %x deleted process %x / cycle %d\n",
    243 __FUNCTION__ , CURRENT_THREAD , process , (uint32_t)hal_get_cycles() );
     245#if CONFIG_DEBUG_SCHED_HANDLE_SIGNALS
     246cycle = (uint32_t)hal_get_cycles();
     247if( CONFIG_DEBUG_SCHED_HANDLE_SIGNALS < cycle )
     248printk("\n[DBG] %s : thread %x deleted process %x / cycle %d\n",
     249__FUNCTION__ , CURRENT_THREAD , process , cycle );
     250#endif
    244251
    245252            }
     
    251258    spinlock_unlock( &sched->lock );
    252259
    253 } // end sched_handle_requests()
     260} // end sched_handle_signals()
    254261
    255262////////////////////////////////
     
    261268    scheduler_t * sched   = &core->scheduler;
    262269 
    263 #if( CONFIG_SCHED_DEBUG & 0x1 )
    264 if( hal_time_stamp() > CONFIG_SCHED_DEBUG ) sched_display( core->lid );
     270#if (CONFIG_DEBUG_SCHED_YIELD & 0x1)
     271if( CONFIG_DEBUG_SCHED_YIELD < (uint32_t)hal_get_cycles() )
     272sched_display( core->lid );
    265273#endif
    266274
     
    291299    {
    292300
    293 sched_dmsg("\n[DBG] %s : core[%x,%d] / cause = %s\n"
     301#if CONFIG_DEBUG_SCHED_YIELD
     302uint32_t cycle = (uint32_t)hal_get_cycles();
     303if( CONFIG_DEBUG_SCHED_YIELD < cycle )
     304printk("\n[DBG] %s : core[%x,%d] / cause = %s\n"
    294305"      thread %x (%s) (%x,%x) => thread %x (%s) (%x,%x) / cycle %d\n",
    295306__FUNCTION__, local_cxy, core->lid, cause,
    296307current, thread_type_str(current->type), current->process->pid, current->trdid,
    297 next   , thread_type_str(next->type)   , next->process->pid   , next->trdid,
    298 (uint32_t)hal_get_cycles() );
     308next , thread_type_str(next->type) , next->process->pid , next->trdid , cycle );
     309#endif
    299310
    300311        // update scheduler
     
    316327    {
    317328
    318 #if( CONFIG_SCHED_DEBUG & 0x1 )
    319 if( hal_time_stamp() > CONFIG_SCHED_DEBUG )
    320 printk("\n[DBG] %s : core[%x,%d] / cause = %s\n"
    321 "      thread %x (%s) (%x,%x) continue / cycle %d\n",
     329#if( CONFIG_DEBUG_SCHED_YIELD & 0x1 )
     330uint32_t cycle = (uint32_t)hal_get_cycles();
     331if( CONFIG_DEBUG_SCHED_YIELD < cycle )
     332printk("\n[DBG] %s : core[%x,%d] / cause = %s / thread %x (%s) (%x,%x) continue / cycle %d\n",
    322333__FUNCTION__, local_cxy, core->lid, cause,
    323 current, thread_type_str(current->type), current->process->pid, current->trdid,
    324 (uint32_t)hal_get_cycles() );
     334current, thread_type_str(current->type), current->process->pid, current->trdid, cycle );
    325335#endif
    326336
     
    328338
    329339    // handle pending requests for all threads executing on this core.
    330     sched_handle_requests( core );
     340    sched_handle_signals( core );
    331341
    332342    // exit critical section / restore SR from next thread context
  • trunk/kernel/kern/scheduler.h

    r428 r433  
    9191 * @ core    : local pointer on the core descriptor.
    9292 ********************************************************************************************/
    93 void sched_handle_requests( struct core_s * core );
     93void sched_handle_signals( struct core_s * core );
    9494
    9595/*********************************************************************************************
  • trunk/kernel/kern/thread.c

    r428 r433  
    227227    assert( (attr != NULL) , __FUNCTION__, "pthread attributes must be defined" );
    228228
    229 thread_dmsg("\n[DBG] %s : core[%x,%d] enter for process %x\n",
    230 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, process->pid );
     229#if CONFIG_DEBUG_THREAD_USER_CREATE
     230uint32_t cycle = (uint32_t)hal_get_cycles();
     231if( CONFIG_DEBUG_THREAD_USER_CREATE < cycle )
     232printk("\n[DBG] %s : thread %x enter / process %x / cycle %d\n",
     233__FUNCTION__, CURRENT_THREAD, pid , cycle );
     234#endif
    231235
    232236    // get process descriptor local copy
    233237    process = process_get_local_copy( pid );
    234 
    235238    if( process == NULL )
    236239    {
     
    326329    dqdt_local_update_threads( 1 );
    327330
    328 thread_dmsg("\n[DBG] %s : core[%x,%d] exit for process %x / trdid = %x / core = %d\n",
    329 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, process->pid, thread->trdid, core_lid );
     331#if CONFIG_DEBUG_THREAD_USER_CREATE
     332cycle = (uint32_t)hal_get_cycles();
     333if( CONFIG_DEBUG_THREAD_USER_CREATE < cycle )
     334printk("\n[DBG] %s : thread %x exit / process %x / new_thread %x / core %d / cycle %d\n",
     335__FUNCTION__, CURRENT_THREAD, pid, thread, core_lid, cycle );
     336#endif
    330337
    331338    *new_thread = thread;
     
    359366    vseg_t       * vseg;             // child thread STACK vseg
    360367
    361 thread_dmsg("\n[DBG] %s : core[%x,%d] enters at cycle %d\n",
    362 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , hal_get_cycles() );
     368#if CONFIG_DEBUG_THREAD_USER_FORK
     369uint32_t cycle = (uint32_t)hal_get_cycles();
     370if( CONFIG_DEBUG_THREAD_USER_FORK < cycle )
     371printk("\n[DBG] %s : thread %x enter / child_process %x / cycle %d\n",
     372__FUNCTION__, CURRENT_THREAD, child_process->pid, cycle );
     373#endif
    363374
    364375    // select a target core in local cluster
     
    474485        }
    475486
    476         // increment page descriptor fork_nr for the referenced page if mapped
     487        // increment pending forks counter for the page if mapped
    477488        if( mapped )
    478489        {
     
    480491            cxy_t    page_cxy = GET_CXY( page_xp );
    481492            page_t * page_ptr = (page_t *)GET_PTR( page_xp );
    482             hal_remote_atomic_add( XPTR( page_cxy , &page_ptr->fork_nr ) , 1 );
    483 
    484 thread_dmsg("\n[DBG] %s : core[%x,%d] copied PTE to child GPT : vpn %x\n",
    485 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , vpn );
     493            hal_remote_atomic_add( XPTR( page_cxy , &page_ptr->forks ) , 1 );
     494
     495#if (CONFIG_DEBUG_THREAD_USER_FORK & 1)
     496cycle = (uint32_t)hal_get_cycles();
     497if( CONFIG_DEBUG_THREAD_USER_FORK < cycle )
     498printk("\n[DBG] %s : thread %x copied stack PTE to child GPT : vpn %x\n",
     499__FUNCTION__, CURRENT_THREAD, vpn );
     500#endif
    486501
    487502        }
    488503    }
    489504
    490     // set COW flag for STAK vseg in parent thread GPT
    491     hal_gpt_flip_cow( true,                               // set cow
    492                       parent_gpt_xp,
    493                       vpn_base,
    494                       vpn_size );
     505    // set COW flag for all mapped entries of STAK vseg in parent thread GPT
     506    hal_gpt_set_cow( parent_gpt_xp,
     507                     vpn_base,
     508                     vpn_size );
    495509 
    496510        // update DQDT for child thread
    497511    dqdt_local_update_threads( 1 );
    498512
    499 thread_dmsg("\n[DBG] %s : core[%x,%d] exit / created main thread %x for process %x\n",
    500 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, child_ptr->trdid, child_process->pid );
     513#if CONFIG_DEBUG_THREAD_USER_FORK
     514cycle = (uint32_t)hal_get_cycles();
     515if( CONFIG_DEBUG_THREAD_USER_FORK < cycle )
     516printk("\n[DBG] %s : thread %x exit / child_process %x / child_thread %x / cycle %d\n",
     517__FUNCTION__, CURRENT_THREAD, child_process->pid, child_ptr, cycle );
     518#endif
    501519
    502520        return 0;
     
    514532        thread_t     * thread;       // pointer on new thread descriptor
    515533
    516 thread_dmsg("\n[DBG] %s : core[%x,%d] enters / type %s / cycle %d\n",
    517 __FUNCTION__ , local_cxy , core_lid , thread_type_str( type ) , hal_time_stamp() );
    518 
    519534    assert( ( (type == THREAD_IDLE) || (type == THREAD_RPC) || (type == THREAD_DEV) ) ,
    520535    __FUNCTION__ , "illegal thread type" );
     
    522537    assert( (core_lid < LOCAL_CLUSTER->cores_nr) ,
    523538            __FUNCTION__ , "illegal core_lid" );
     539
     540#if CONFIG_DEBUG_THREAD_KERNEL_CREATE
     541uint32_t cycle = (uint32_t)hal_get_cycles();
     542if( CONFIG_DEBUG_THREAD_KERNEL_CREATE < cycle )
     543printk("\n[DBG] %s : thread %x enter / requested_type %s / cycle %d\n",
     544__FUNCTION__, CURRENT_THREAD, thread, thread_type_str(type), cycle );
     545#endif
    524546
    525547    // allocate memory for new thread descriptor
     
    549571    dqdt_local_update_threads( 1 );
    550572
    551 thread_dmsg("\n[DBG] %s : core = [%x,%d] exit / trdid = %x / type %s / cycle %d\n",
    552 __FUNCTION__, local_cxy, core_lid, thread->trdid, thread_type_str(type), hal_time_stamp() );
     573#if CONFIG_DEBUG_THREAD_KERNEL_CREATE
     574cycle = (uint32_t)hal_get_cycles();
     575if( CONFIG_DEBUG_THREAD_KERNEL_CREATE < cycle )
     576printk("\n[DBG] %s : thread %x exit / new_thread %x / type %s / cycle %d\n",
     577__FUNCTION__, CURRENT_THREAD, thread, thread_type_str(type), cycle );
     578#endif
    553579
    554580    *new_thread = thread;
     
    589615void thread_destroy( thread_t * thread )
    590616{
    591         uint32_t     tm_start;
    592         uint32_t     tm_end;
    593617    reg_t        save_sr;
    594618
     
    596620    core_t     * core       = thread->core;
    597621
    598     thread_dmsg("\n[DBG] %s : enters for thread %x in process %x / type = %s\n",
    599                 __FUNCTION__ , thread->trdid , process->pid , thread_type_str( thread->type ) );
     622#if CONFIG_DEBUG_THREAD_DESTROY
     623uint32_t cycle = (uint32_t)hal_get_cycles();
     624if( CONFIG_DEBUG_THREAD_DESTROY < cycle )
     625printk("\n[DBG] %s : thread %x enter to destroy thread %x in process %x / cycle %d\n",
     626__FUNCTION__, CURRENT_THREAD, thread, process->pid, cycle );
     627#endif
    600628
    601629    assert( (thread->children_nr == 0) , __FUNCTION__ , "still attached children" );
     
    604632
    605633    assert( (thread->remote_locks == 0) , __FUNCTION__ , "all remote locks not released" );
    606 
    607         tm_start = hal_get_cycles();
    608634
    609635    // update intrumentation values
     
    635661    thread_release( thread );
    636662
    637         tm_end = hal_get_cycles();
    638 
    639         thread_dmsg("\n[DBG] %s : exit for thread %x in process %x / duration = %d\n",
    640                        __FUNCTION__, thread->trdid , process->pid , tm_end - tm_start );
     663#if CONFIG_DEBUG_THREAD_DESTROY
     664cycle = (uint32_t)hal_get_cycles();
     665if( CONFIG_DEBUG_THREAD_DESTROY < cycle )
     666printk("\n[DBG] %s : thread %x exit / destroyed thread %x in process %x / cycle %d\n",
     667__FUNCTION__, CURRENT_THREAD, thread, process->pid, cycle );
     668#endif
    641669
    642670}   // end thread_destroy()
     
    779807    hal_fence();
    780808
     809#if CONFIG_DEBUG_THREAD_BLOCK
     810uint32_t cycle = (uint32_t)hal_get_cycles();
     811if( CONFIG_DEBUG_THREAD_BLOCK < cycle )
     812printk("\n[DBG] %s : thread %x blocked thread %x / cause %x / state %x / cycle %d\n",
     813__FUNCTION__ , CURRENT_THREAD , thread , cause , thread->blocked , cycle );
     814#endif
     815
    781816} // end thread_block()
    782817
    783 /////////////////////////////////////////
    784 uint32_t thread_unblock( xptr_t   thread,
     818////////////////////////////////////////////
     819uint32_t thread_unblock( xptr_t   thread_xp,
    785820                         uint32_t cause )
    786821{
    787822    // get thread cluster and local pointer
    788     cxy_t      cxy = GET_CXY( thread );
    789     thread_t * ptr = (thread_t *)GET_PTR( thread );
     823    cxy_t      cxy = GET_CXY( thread_xp );
     824    thread_t * ptr = GET_PTR( thread_xp );
    790825
    791826    // reset blocking cause
     
    793828    hal_fence();
    794829
     830#if CONFIG_DEBUG_THREAD_BLOCK
     831uint32_t cycle = (uint32_t)hal_get_cycles();
     832if( CONFIG_DEBUG_THREAD_BLOCK < cycle )
     833printk("\n[DBG] %s : thread %x unblocked thread %x / cause %x / state %x / cycle %d\n",
     834__FUNCTION__ , CURRENT_THREAD , ptr , cause , ptr->blocked , cycle );
     835#endif
     836
    795837    // return a non zero value if the cause bit is modified
    796838    return( previous & cause );
     
    805847    thread_t * killer = CURRENT_THREAD;
    806848
    807 thread_dmsg("\n[DBG] %s : killer thread %x enter for target thread %x\n",
    808 __FUNCTION__, local_cxy, killer->trdid , target->trdid );
     849#if CONFIG_DEBUG_THREAD_KILL
     850uint32_t cycle  = (uint32_t)hal_get_cycles;
     851if( CONFIG_DEBUG_THREAD_KILL < cycle )
     852printk("\n[DBG] %s : thread %x enter for target thread %x / cycle %d\n",
     853__FUNCTION__, killer, target, cycle );
     854#endif
    809855
    810856    // set the global blocked bit in target thread descriptor.
     
    835881        hal_atomic_or( &target->flags , THREAD_FLAG_REQ_DELETE );
    836882
    837 thread_dmsg("\n[DBG] %s : killer thread %x exit for target thread %x\n",
    838 __FUNCTION__, local_cxy, killer->trdid , target->trdid );
     883#if CONFIG_DEBUG_THREAD_KILL
     884cycle  = (uint32_t)hal_get_cycles;
     885if( CONFIG_DEBUG_THREAD_KILL < cycle )
     886printk("\n[DBG] %s : thread %x exit for target thread %x / cycle %d\n",
     887__FUNCTION__, killer, target, cycle );
     888#endif
    839889
    840890}  // end thread_kill()
     
    851901        {
    852902
    853 idle_dmsg("\n[DBG] %s : core[%x][%d] goes to sleep at cycle %d\n",
    854 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , hal_get_cycles() );
     903#if CONFIG_DEBUG_THREAD_IDLE
     904uint32_t cycle  = (uint32_t)hal_get_cycles;
     905thread_t * this = CURRENT_THREAD;
     906if( CONFIG_DEBUG_THREAD_IDLE < cycle )
     907printk("\n[DBG] %s : idle thread %x on core[%x,%d] goes to sleep / cycle %d\n",
     908__FUNCTION__, this, local_cxy, this->core->lid, cycle );
     909#endif
    855910
    856911            hal_core_sleep();
    857912
    858 idle_dmsg("\n[DBG] %s : core[%x][%d] wake up at cycle %d\n",
    859 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , hal_get_cycles() );
     913#if CONFIG_DEBUG_THREAD_IDLE
     914cycle  = (uint32_t)hal_get_cycles;
     915if( CONFIG_DEBUG_THREAD_IDLE < cycle )
     916printk("\n[DBG] %s : idle thread %x on core[%x,%d] wake up / cycle %d\n",
     917__FUNCTION__, this, local_cxy, this->core->lid, cycle );
     918#endif
    860919
    861920        }
  • trunk/kernel/libk/elf.c

    r407 r433  
    201201                vfs_file_count_up( file_xp );
    202202
    203                 elf_dmsg("\n[DBG] %s : found %s vseg / base = %x / size = %x\n"
    204                  "       file_size = %x / file_offset = %x / mapper_xp = %l\n",
    205             __FUNCTION__ , vseg_type_str(vseg->type) , vseg->min , vseg->max - vseg->min ,
    206         vseg->file_size , vseg->file_offset , vseg->mapper_xp );
     203#if CONFIG_DEBUG_ELF_LOAD
     204uint32_t cycle = (uint32_t)hal_get_cycles();
     205if( CONFIG_DEBUG_ELF_LOAD < cycle )
     206printk("\n[DBG] %s : found %s vseg / base %x / size %x\n"
     207"  file_size %x / file_offset %x / mapper_xp %l / cycle %d\n",
     208__FUNCTION__ , vseg_type_str(vseg->type) , vseg->min , vseg->max - vseg->min ,
     209vseg->file_size , vseg->file_offset , vseg->mapper_xp );
     210#endif
     211
    207212        }
    208213
     
    223228        error_t      error;
    224229
    225     elf_dmsg("\n[DBG] %s : core[%x,%d] enter for <%s>\n",
    226     __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , pathname );
     230#if CONFIG_DEBUG_ELF_LOAD
     231uint32_t cycle = (uint32_t)hal_get_cycles();
     232if( CONFIG_DEBUG_ELF_LOAD < cycle )
     233printk("\n[DBG] %s : thread %d enter for <%s> / cycle %d\n",
     234__FUNCTION__, CURRENT_THREAD, pathname, cycle );
     235#endif
    227236
    228237    // avoid GCC warning
     
    243252        }
    244253
    245     elf_dmsg("\n[DBG] %s : open file <%s>\n", __FUNCTION__ , pathname );
     254#if (CONFIG_DEBUG_ELF_LOAD & 1)
     255if( CONFIG_DEBUG_ELF_LOAD < cycle )
     256printk("\n[DBG] %s : open file <%s>\n", __FUNCTION__, pathname );
     257#endif
    246258
    247259        // load header in local buffer
     
    256268        }
    257269
    258         elf_dmsg("\n[DBG] %s : loaded elf header for %s\n", __FUNCTION__ , pathname );
     270#if (CONFIG_DEBUG_ELF_LOAD & 1)
     271if( CONFIG_DEBUG_ELF_LOAD < cycle )
     272printk("\n[DBG] %s : loaded elf header for %s\n", __FUNCTION__ , pathname );
     273#endif
    259274
    260275        if( header.e_phnum == 0 )
     
    293308        }
    294309
    295         elf_dmsg("\n[DBG] %s : segments array allocated for %s\n", __FUNCTION__ , pathname );
     310#if (CONFIG_DEBUG_ELF_LOAD & 1)
     311if( CONFIG_DEBUG_ELF_LOAD < cycle )
     312printk("\n[DBG] %s : segments array allocated for %s\n", __FUNCTION__ , pathname );
     313#endif
    296314
    297315        // load seg descriptors array to local buffer
     
    310328        }
    311329
    312         elf_dmsg("\n[DBG] %s loaded segments descriptors for %s \n", __FUNCTION__ , pathname );
     330#if (CONFIG_DEBUG_ELF_LOAD & 1)
     331if( CONFIG_DEBUG_ELF_LOAD < cycle )
     332printk("\n[DBG] %s loaded segments descriptors for %s \n", __FUNCTION__ , pathname );
     333#endif
    313334
    314335        // register loadable segments in process VMM
     
    335356        kmem_free(&req);
    336357
    337     elf_dmsg("\n[DBG] %s : core[%x,%d] exit for <%s> / entry_point = %x\n",
    338     __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , pathname , header.e_entry );
     358#if CONFIG_DEBUG_ELF_LOAD
     359cycle = (uint32_t)hal_get_cycles();
     360if( CONFIG_DEBUG_ELF_LOAD < cycle )
     361printk("\n[DBG] %s : thread %d exit for <%s> / entry_point %x / cycle %d\n",
     362__FUNCTION__, CURRENT_THREAD, pathname, header.e_entry, cycle );
     363#endif
    339364
    340365        return 0;
  • trunk/kernel/libk/remote_rwlock.c

    r423 r433  
    4141    hal_remote_sw ( XPTR( lock_cxy , &lock_ptr->count )   , 0 );
    4242
    43 #if CONFIG_LOCKS_DEBUG
     43#if CONFIG_DEBUG_LOCKS
    4444    hal_remote_swd( XPTR( lock_cxy , &lock_ptr->owner )   , XPTR_NULL );
    4545    xlist_entry_init( XPTR( lock_cxy , &lock_ptr->list ) );
     
    8686    thread_ptr->remote_locks++;
    8787
    88 #if CONFIG_LOCKS_DEBUG
     88#if CONFIG_DEBUG_LOCKS
    8989    xlist_add_first( XPTR( local_cxy , &thread_ptr->xlocks_root ) ,
    9090                     XPTR( lock_cxy ,  &lock_ptr->list ) );
     
    126126        thread_ptr->remote_locks--;
    127127
    128 #if CONFIG_LOCKS_DEBUG
     128#if CONFIG_DEBUG_LOCKS
    129129    xlist_unlink( XPTR( lock_cxy , &lock_ptr->list ) );
    130130#endif
     
    176176    }
    177177
    178 #if CONFIG_LOCKS_DEBUG
     178#if CONFIG_DEBUG_LOCKS
    179179    hal_remote_swd( XPTR( lock_cxy  , &lock_ptr->owner ) ,
    180180                    XPTR( local_cxy , thread_ptr ) );
  • trunk/kernel/libk/remote_spinlock.c

    r423 r433  
    3939        hal_remote_sw ( XPTR( cxy , &ptr->taken ) , 0 );
    4040
    41 #if CONFIG_LOCKS_DEBUG
     41#if CONFIG_DEBUG_LOCKS
    4242        hal_remote_swd( XPTR( cxy , &ptr->owner ) , XPTR_NULL );
    4343        xlist_entry_init( XPTR( cxy , &ptr->list ) );
     
    7676                thread_ptr->remote_locks++;
    7777
    78 #if CONFIG_LOCKS_DEBUG
     78#if CONFIG_DEBUG_LOCKS
    7979                hal_remote_swd( XPTR( lock_cxy , &lock_ptr->owner ) ,
    8080                                XPTR( local_cxy , thread_ptr) );
     
    121121        thread_ptr->remote_locks++;
    122122
    123 #if CONFIG_LOCKS_DEBUG
     123#if CONFIG_DEBUG_LOCKS
    124124        hal_remote_swd( XPTR( lock_cxy  , &lock_ptr->owner ) ,
    125125                        XPTR( local_cxy , thread_ptr) );
     
    144144        thread_t          * thread_ptr = CURRENT_THREAD;
    145145
    146 #if CONFIG_LOCKS_DEBUG
     146#if CONFIG_DEBUG_LOCKS
    147147        hal_remote_swd( XPTR( lock_cxy , &lock_ptr->owner ) , XPTR_NULL );
    148148        xlist_unlink( XPTR( lock_cxy , &lock_ptr->list ) );
     
    197197        thread_ptr->remote_locks++;
    198198
    199 #if CONFIG_LOCKS_DEBUG
     199#if CONFIG_DEBUG_LOCKS
    200200        hal_remote_swd( XPTR( lock_cxy  , &lock_ptr->owner ) ,
    201201                        XPTR( local_cxy , thread_ptr) );
     
    218218        thread_t          * thread_ptr = CURRENT_THREAD;
    219219
    220 #if CONFIG_LOCKS_DEBUG
     220#if CONFIG_DEBUG_LOCKS
    221221        hal_remote_swd( XPTR( lock_cxy , &lock_ptr->owner ) , XPTR_NULL );
    222222        xlist_unlink( XPTR( lock_cxy , &lock_ptr->list ) );
  • trunk/kernel/libk/remote_spinlock.h

    r409 r433  
    7272 * This function releases a remote busy_waiting spinlock.
    7373 * It restores the CPU SR state.
     74 * It decrements the calling thread locks count.
    7475 *******************************************************************************************
    7576 * @ lock_xp    : extended pointer on remote spinlock.
     
    100101/***************************************************************************************
    101102 * This function releases a remote spinlock.
     103 * It decrements the calling thread locks count.
    102104 ***************************************************************************************
    103105 * @ lock_xp    : extended pointer on the remote spinlock
  • trunk/kernel/libk/rwlock.c

    r409 r433  
    3838    lock->count   = 0;
    3939
    40 #if CONFIG_LOCKS_DEBUG
     40#if CONFIG_DEBUG_LOCKS
    4141        lock->owner   = NULL;
    4242    list_entry_init( &lock->list );
     
    7070    this->local_locks++;
    7171
    72 #if CONFIG_LOCKS_DEBUG
     72#if CONFIG_DEBUG_LOCKS
    7373    list_add_first( &this->locks_root , &lock->list );
    7474#endif
     
    9898    this->local_locks--;
    9999
    100 #if CONFIG_LOCKS_DEBUG
     100#if CONFIG_DEBUG_LOCKS
    101101    list_unlink( &lock->list );
    102102#endif
     
    138138    this->local_locks++;
    139139
    140 #if CONFIG_LOCKS_DEBUG
     140#if CONFIG_DEBUG_LOCKS
    141141    lock->owner = this;
    142142    list_add_first( &this->locks_root , &lock->list );
     
    157157        hal_disable_irq( &mode );
    158158 
    159 #if CONFIG_LOCKS_DEBUG
     159#if CONFIG_DEBUG_LOCKS
    160160    lock->owner = NULL;
    161161    list_unlink( &lock->list );
  • trunk/kernel/libk/spinlock.c

    r409 r433  
    3838    lock->taken = 0;
    3939
    40 #if CONFIG_LOCKS_DEBUG
     40#if CONFIG_DEBUG_LOCKS
    4141    lock->owner = NULL;
    4242    list_entry_init( &lock->list );
     
    7171    this->local_locks++;
    7272
    73 #if CONFIG_LOCKS_DEBUG
     73#if CONFIG_DEBUG_LOCKS
    7474    lock->owner = this;
    7575    list_add_first( &this->locks_root , &lock->list );
     
    8686    thread_t * this = CURRENT_THREAD;;
    8787
    88 #if CONFIG_LOCKS_DEBUG
     88#if CONFIG_DEBUG_LOCKS
    8989    lock->owner = NULL;
    9090    list_unlink( &lock->list );
     
    132132    this->local_locks++;
    133133
    134 #if CONFIG_LOCKS_DEBUG
     134#if CONFIG_DEBUG_LOCKS
    135135    lock->owner = this;
    136136    list_add_first( &this->locks_root , &lock->list );
     
    162162        this->local_locks++;
    163163
    164 #if CONFIG_LOCKS_DEBUG
     164#if CONFIG_DEBUG_LOCKS
    165165        lock->owner = this;
    166166        list_add_first( &this->locks_root , &lock->list );
     
    177177    thread_t * this = CURRENT_THREAD;
    178178
    179 #if CONFIG_LOCKS_DEBUG
     179#if CONFIG_DEBUG_LOCKS
    180180    lock->owner = NULL;
    181181    list_unlink( &lock->list );
  • trunk/kernel/mm/kcm.c

    r407 r433  
    4747                             kcm_page_t * kcm_page )
    4848{
    49         kcm_dmsg("\n[DBG] %s : enters for %s / page %x / count = %d / active = %d\n",
    50                  __FUNCTION__ , kmem_type_str( kcm->type ) ,
    51                  (intptr_t)kcm_page , kcm_page->count , kcm_page->active );
     49
     50#if CONFIG_DEBUG_KCM_ALLOC
     51uint32_t cycle = (uint32_t)hal_get_cycles();
     52if( CONFIG_DEBUG_KCM_ALLOC < cycle )
     53printk("\n[DBG] %s : thread %x enters for %s / page %x / count %d / active %d\n",
     54__FUNCTION__ , CURRENT_THREAD , kmem_type_str( kcm->type ) ,
     55(intptr_t)kcm_page , kcm_page->count , kcm_page->active );
     56#endif
    5257
    5358        assert( kcm_page->active , __FUNCTION__ , "kcm_page should be active" );
     
    8085                     + (index * kcm->block_size) );
    8186
    82         kcm_dmsg("\n[DBG] %s : allocated one block  %s / ptr = %p / page = %x / count = %d\n",
    83                  __FUNCTION__ , kmem_type_str( kcm->type ) , ptr ,
    84                  (intptr_t)kcm_page , kcm_page->count );
     87#if CONFIG_DEBUG_KCM_ALLOC
     88cycle = (uint32_t)hal_get_cycles();
     89if( CONFIG_DEBUG_KCM_ALLOC < cycle )
     90printk("\n[DBG] %s : thread %x exit / type  %s / ptr %p / page %x / count %d\n",
     91__FUNCTION__ , CURRENT_THREAD , kmem_type_str( kcm->type ) , ptr ,
     92(intptr_t)kcm_page , kcm_page->count );
     93#endif
    8594
    8695        return ptr;
     
    300309                kcm->active_pages_nr ++;
    301310                kcm_page->active = 1;
    302 
    303                 kcm_dmsg("\n[DBG] %s : enters for type %s at cycle %d / new page = %x / count = %d\n",
    304                          __FUNCTION__ , kmem_type_str( kcm->type ) , hal_get_cycles() ,
    305                          (intptr_t)kcm_page , kcm_page->count );
    306 
    307311        }
    308312        else                                    // get first page from active list
     
    310314                // get page pointer from active list
    311315                kcm_page = (kcm_page_t *)LIST_FIRST( &kcm->active_root , kcm_page_t , list );
    312 
    313                 kcm_dmsg("\n[DBG] %s : enters for type %s at cycle %d / page = %x / count = %d\n",
    314                          __FUNCTION__ , kmem_type_str( kcm->type ) , hal_get_cycles() ,
    315                          (intptr_t)kcm_page , kcm_page->count );
    316316        }
    317317
  • trunk/kernel/mm/kmem.c

    r429 r433  
    198198        if( type == KMEM_PAGE )                        // PPM allocator
    199199        {
     200
     201#if CONFIG_DEBUG_KMEM_ALLOC
     202if( CONFIG_DEBUG_KMEM_ALLOC < (uint32_t)hal_get_cycles() )
     203printk("\n[DBG] in %s : thread %x enter for %d page(s)\n",
     204__FUNCTION__ , CURRENT_THREAD , 1<<size );
     205#endif
     206
    200207                // allocate the number of requested pages
    201208                ptr = (void *)ppm_alloc_pages( size );
     
    213220                          __FUNCTION__, local_cxy , kmem_type_str( type ) ,
    214221                          (intptr_t)ptr , (intptr_t)ppm_page2base( ptr ) );
     222
     223#if CONFIG_DEBUG_KMEM_ALLOC
     224if( CONFIG_DEBUG_KMEM_ALLOC < (uint32_t)hal_get_cycles() )
     225printk("\n[DBG] in %s : thread %x exit / %d page(s) allocated / ppn = %x\n",
     226__FUNCTION__ , CURRENT_THREAD , 1<<size , ppm_page2ppn( XPTR( local_cxy , ptr ) ) );
     227#endif
     228
    215229        }
    216230        else if( type == KMEM_GENERIC )                // KHM allocator
  • trunk/kernel/mm/page.c

    r408 r433  
    4747        page->index    = 0;
    4848        page->refcount = 0;
    49         page->fork_nr  = 0;
     49        page->fork  = 0;
    5050
    5151        spinlock_init( &page->lock );
  • trunk/kernel/mm/page.h

    r408 r433  
    5656 * This structure defines a physical page descriptor.
    5757 * Size is 64 bytes for a 32 bits core...
     58 * TODO : the list of waiting threads seems to be unused [AG]
     59 $ TODO : the spinlock use has to be clarified [AG]
    5860 ************************************************************************************/
    5961
     
    6769    xlist_entry_t     wait_root;      /*! root of list of waiting threads      (16) */
    6870        uint32_t          refcount;       /*! reference counter                    (4)  */
    69         uint32_t          fork_nr;        /*! number of pending forks              (4)  */
    70         spinlock_t        lock;           /*! only used to set the PG_LOCKED flag  (16) */
     71        uint32_t          forks;          /*! number of pending forks              (4)  */
     72        spinlock_t        lock;           /*! To Be Defined [AG]                   (16) */
    7173}
    7274page_t;
  • trunk/kernel/mm/ppm.c

    r407 r433  
    193193        list_add_first( &ppm->free_pages_root[current_order] , &current->list );
    194194        ppm->free_pages_nr[current_order] ++;
    195 }
     195
     196}  // end ppm_free_pages_nolock()
    196197
    197198////////////////////////////////////////////
     
    201202        page_t   * remaining_block;
    202203        uint32_t   current_size;
     204 
     205#if CONFIG_DEBUG_PPM_ALLOC_PAGES
     206uint32_t cycle = (uint32_t)hal_get_cycles();
     207if( CONFIG_DEBUG_PPM_ALLOC_PAGES < cycle )
     208printk("\n[DBG] in %s : thread %x enter for %d page(s) / cycle %d\n",
     209__FUNCTION__ , CURRENT_THREAD , 1<<order, cycle );
     210#endif
     211
     212#if(CONFIG_DEBUG_PPM_ALLOC_PAGES & 0x1)
     213if( CONFIG_DEBUG_PPM_ALLOC_PAGES < cycle )
     214ppm_print();
     215#endif
    203216
    204217        ppm_t    * ppm = &LOCAL_CLUSTER->ppm;
     
    208221
    209222        page_t * block = NULL; 
    210 
    211         ppm_dmsg("\n[DBG] %s : enters / order = %d\n",
    212                  __FUNCTION__ , order );
    213223
    214224        // take lock protecting free lists
     
    231241                spinlock_unlock( &ppm->free_lock );
    232242
     243#if CONFIG_DEBUG_PPM_ALLOC_PAGES
     244cycle = (uint32_t)hal_get_cycles();
     245if( CONFIG_DEBUG_PPM_ALLOC_PAGES < cycle )
     246printk("\n[DBG] in %s : thread %x cannot allocate %d page(s) at cycle %d\n",
     247__FUNCTION__ , CURRENT_THREAD , 1<<order, cycle );
     248#endif
     249
    233250                return NULL;
    234251        }
     
    260277        spinlock_unlock( &ppm->free_lock );
    261278
    262         ppm_dmsg("\n[DBG] %s : base = %x / order = %d\n",
    263                  __FUNCTION__ , (uint32_t)ppm_page2base( block ) , order );
     279#if CONFIG_DEBUG_PPM_ALLOC_PAGES
     280cycle = (uint32_t)hal_get_cycles();
     281if( CONFIG_DEBUG_PPM_ALLOC_PAGES < cycle )
     282printk("\n[DBG] in %s : thread %x exit / %d page(s) allocated / ppn = %x / cycle %d\n",
     283__FUNCTION__, CURRENT_THREAD, 1<<order, ppm_page2ppn(XPTR( local_cxy , block )), cycle );
     284#endif
    264285
    265286        return block;
    266 }
     287
     288}  // end ppm_alloc_pages()
    267289
    268290
     
    272294        ppm_t * ppm = &LOCAL_CLUSTER->ppm;
    273295
     296#if CONFIG_DEBUG_PPM_FREE_PAGES
     297uint32_t cycle = (uint32_t)hal_get_cycles();
     298if( CONFIG_DEBUG_PPM_FREE_PAGES < cycle )
     299printk("\n[DBG] in %s : thread %x enter for %d page(s) / cycle %d\n",
     300__FUNCTION__ , CURRENT_THREAD , 1<<page->order , cycle );
     301#endif
     302
     303#if(CONFIG_DEBUG_PPM_FREE_PAGES & 0x1)
     304if( CONFIG_DEBUG_PPM_FREE_PAGES < cycle )
     305ppm_print();
     306#endif
     307
    274308        // get lock protecting free_pages[] array
    275309        spinlock_lock( &ppm->free_lock );
     
    279313        // release lock protecting free_pages[] array
    280314        spinlock_unlock( &ppm->free_lock );
     315
     316#if CONFIG_DEBUG_PPM_FREE_PAGES
     317cycle = (uint32_t)hal_get_cycles();
     318if( CONFIG_DEBUG_PPM_FREE_PAGES < cycle )
     319printk("\n[DBG] in %s : thread %x exit / %d page(s) released / ppn = %x / cycle %d\n",
     320__FUNCTION__, CURRENT_THREAD, 1<<page->order, ppm_page2ppn(XPTR(local_cxy , page)), cycle );
     321#endif
     322
    281323}
    282324
    283 ////////////////////////////
    284 void ppm_print( ppm_t * ppm,
    285                 char  * string )
     325////////////////
     326void ppm_print()
    286327{
    287328        uint32_t       order;
     
    289330        page_t       * page;
    290331
     332    ppm_t * ppm = &LOCAL_CLUSTER->ppm;
     333
    291334        // get lock protecting free lists
    292335        spinlock_lock( &ppm->free_lock );
    293336
    294         printk("\n***  PPM in cluster %x : %d pages / &pages_tbl = %x / vaddr_base = %x ***\n",
    295     local_cxy , ppm->pages_nr , (intptr_t)ppm->pages_tbl , (intptr_t)ppm->vaddr_base );
     337        printk("\n***  PPM in cluster %x : %d pages ***\n", local_cxy , ppm->pages_nr );
    296338
    297339        for( order = 0 ; order < CONFIG_PPM_MAX_ORDER ; order++ )
    298340        {
    299                 printk("- order = %d / free_pages = %d  [",
     341                printk("- order = %d / free_pages = %d\t: ",
    300342                       order , ppm->free_pages_nr[order] );
    301343
     
    303345                {
    304346                        page = LIST_ELEMENT( iter , page_t , list );
    305                         printk("%d," , page - ppm->pages_tbl );
     347                        printk("%x," , page - ppm->pages_tbl );
    306348                }
    307349
    308                 printk("]\n", NULL );
     350                printk("\n");
    309351        }
    310352
  • trunk/kernel/mm/ppm.h

    r409 r433  
    5252 * from the "kernel_heap" section.
    5353 * This low-level allocator implements the buddy algorithm: an allocated block is
    54  * an integer number n of 4 Kbytes pages, and n (called order) is a power of 2.
     54 * an integer number n of 4 small pages, and n (called order) is a power of 2.
    5555 ****************************************************************************************/
    5656
     
    163163
    164164/*****************************************************************************************
    165  * This function prints the PPM allocator status.
    166  *****************************************************************************************
    167  * @ ppm      : pointer on PPM allocator.
    168  * @ string   : define context of display.
     165 * This function prints the PPM allocator status in the calling thread cluster.
    169166 ****************************************************************************************/
    170 void ppm_print( ppm_t * ppm,
    171                 char  * string );
     167void ppm_print();
    172168
    173169/*****************************************************************************************
  • trunk/kernel/mm/vmm.c

    r429 r433  
    6363    intptr_t  size;
    6464
    65 vmm_dmsg("\n[DBG] %s : core[%x,%d] enters for process %x\n",
    66 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , process->pid );
     65#if CONFIG_DEBUG_VMM_INIT
     66uint32_t cycle = (uint32_t)hal_get_cycles();
     67if( CONFIG_DEBUG_VMM_INIT )
     68printk("\n[DBG] %s : thread %x enter for process %x / cycle %d\n",
     69__FUNCTION__ , CURRENT_THREAD , process->pid , cycle );
     70#endif
    6771
    6872    // get pointer on VMM
     
    179183    hal_fence();
    180184
    181 vmm_dmsg("\n[DBG] %s : core[%x,%d] exit for process %x / entry_point = %x\n",
    182 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid ,
    183 process->pid , process->vmm.entry_point );
     185#if CONFIG_DEBUG_VMM_INIT
     186cycle = (uint32_t)hal_get_cycles();
     187if( CONFIG_DEBUG_VMM_INIT )
     188printk("\n[DBG] %s : thread %x exit for process %x / entry_point = %x / cycle %d\n",
     189__FUNCTION__ , CURRENT_THREAD , process->pid , process->vmm.entry_point , cycle );
     190#endif
    184191
    185192    return 0;
     
    211218    {
    212219        vseg_xp = XLIST_ELEMENT( iter_xp , vseg_t , xlist );
    213         vseg    = (vseg_t *)GET_PTR( vseg_xp );
     220        vseg    = GET_PTR( vseg_xp );
    214221
    215222        printk(" - %s : base = %X / size = %X / npages = %d\n",
     
    239246}  // vmm_display()
    240247
    241 /////////////////////i////////////////////
    242 void vmm_update_pte( process_t * process,
    243                      vpn_t       vpn,
    244                      uint32_t    attr,
    245                      ppn_t       ppn )
     248/////////////////////i//////////////////////////
     249void vmm_global_update_pte( process_t * process,
     250                            vpn_t       vpn,
     251                            uint32_t    attr,
     252                            ppn_t       ppn )
    246253{
    247254
     
    258265    cxy_t           owner_cxy;
    259266    lpid_t          owner_lpid;
     267
     268#if CONFIG_DEBUG_VMM_UPDATE_PTE
     269uint32_t cycle = (uint32_t)hal_get_cycles();
     270if( CONFIG_DEBUG_VMM_UPDATE_PTE < cycle )
     271printk("\n[DBG] %s : thread %x enter for process %x / vpn %x / cycle %d\n",
     272__FUNCTION__ , CURRENT_THREAD , process->pid , vpn , cycle );
     273#endif
     274
     275    // check cluster is reference
     276    assert( (GET_CXY( process->ref_xp ) == local_cxy) , __FUNCTION__,
     277    "not called in reference cluster\n");
    260278
    261279    // get extended pointer on root of process copies xlist in owner cluster
     
    271289        // get cluster and local pointer on remote process
    272290        remote_process_xp  = XLIST_ELEMENT( process_iter_xp , process_t , copies_list );
    273         remote_process_ptr = (process_t *)GET_PTR( remote_process_xp );
     291        remote_process_ptr = GET_PTR( remote_process_xp );
    274292        remote_process_cxy = GET_CXY( remote_process_xp );
     293
     294#if (CONFIG_DEBUG_VMM_UPDATE_PTE & 0x1)
     295if( CONFIG_DEBUG_VMM_UPDATE_PTE < cycle )
     296printk("\n[DBG] %s : thread %x handling process %x in cluster %x\n",
     297__FUNCTION__ , CURRENT_THREAD , process->pid , remote_process_cxy );
     298#endif
    275299
    276300        // get extended pointer on remote gpt
    277301        remote_gpt_xp = XPTR( remote_process_cxy , &remote_process_ptr->vmm.gpt );
    278302
    279         hal_gpt_update_pte( remote_gpt_xp,
    280                             vpn,
    281                             attr,
    282                             ppn );
     303        // update remote GPT
     304        hal_gpt_update_pte( remote_gpt_xp, vpn, attr, ppn );
    283305    } 
    284 }  // end vmm_update_pte()
     306
     307#if CONFIG_DEBUG_VMM_UPDATE_PTE
     308cycle = (uint32_t)hal_get_cycles();
     309if( CONFIG_DEBUG_VMM_UPDATE_PTE < cycle )
     310printk("\n[DBG] %s : thread %x exit for process %x / vpn %x / cycle %d\n",
     311__FUNCTION__ , CURRENT_THREAD , process->pid , vpn , cycle );
     312#endif
     313
     314}  // end vmm_global_update_pte()
    285315
    286316///////////////////////////////////////
     
    308338    lpid_t          owner_lpid;
    309339
    310 vmm_dmsg("\n[DBG] %s : core[%x,%d] enters for process %x\n",
    311 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , process->pid );
     340#if CONFIG_DEBUG_VMM_SET_COW
     341uint32_t cycle = (uint32_t)hal_get_cycles();
     342if( CONFIG_DEBUG_VMM_SET_COW < cycle )
     343printk("\n[DBG] %s : thread %x enter for process %x / cycle %d\n",
     344__FUNCTION__ , CURRENT_THREAD , process->pid , cycle );
     345#endif
    312346
    313347    // check cluster is reference
     
    333367        // get cluster and local pointer on remote process
    334368        remote_process_xp  = XLIST_ELEMENT( process_iter_xp , process_t , copies_list );
    335         remote_process_ptr = (process_t *)GET_PTR( remote_process_xp );
     369        remote_process_ptr = GET_PTR( remote_process_xp );
    336370        remote_process_cxy = GET_CXY( remote_process_xp );
    337371
    338 vmm_dmsg("\n[DBG] %s : core[%x,%d] handling process %x in cluster %x\n",
    339 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , process->pid , remote_process_cxy );
     372#if (CONFIG_DEBUG_VMM_SET_COW &0x1)
     373if( CONFIG_DEBUG_VMM_SET_COW < cycle )
     374printk("\n[DBG] %s : thread %x handling process %x in cluster %x\n",
     375__FUNCTION__ , CURRENT_THREAD , process->pid , remote_process_cxy );
     376#endif
    340377
    341378        // get extended pointer on remote gpt
     
    347384            // get pointer on vseg
    348385            vseg_xp  = XLIST_ELEMENT( vseg_iter_xp , vseg_t , xlist );
    349             vseg     = (vseg_t *)GET_PTR( vseg_xp );
     386            vseg     = GET_PTR( vseg_xp );
    350387
    351388            assert( (GET_CXY( vseg_xp ) == local_cxy) , __FUNCTION__,
     
    357394            vpn_t    vpn_size = vseg->vpn_size;
    358395
    359 vmm_dmsg("\n[DBG] %s : core[%x,%d] handling vseg %s / vpn_base = %x / vpn_size = %x\n",
    360 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, vseg_type_str(type), vpn_base, vpn_size );
    361 
    362             // set COW flag on the remote GPT depending on vseg type
     396#if (CONFIG_DEBUG_VMM_SET_COW & 0x1)
     397if( CONFIG_DEBUG_VMM_SET_COW < cycle )
     398printk("\n[DBG] %s : thread %x handling vseg %s / vpn_base = %x / vpn_size = %x\n",
     399__FUNCTION__, CURRENT_THREAD , vseg_type_str(type), vpn_base, vpn_size );
     400#endif
     401            // only DATA, ANON and REMOTE vsegs
    363402            if( (type == VSEG_TYPE_DATA)  ||
    364403                (type == VSEG_TYPE_ANON)  ||
    365404                (type == VSEG_TYPE_REMOTE) )
    366405            {
    367                 hal_gpt_flip_cow( true,             // set_cow
    368                                   remote_gpt_xp,
    369                                   vpn_base,
    370                                   vpn_size );
    371             }
    372         }    // en loop on vsegs
     406                vpn_t      vpn;
     407                uint32_t   attr;
     408                ppn_t      ppn;
     409                xptr_t     page_xp;
     410                cxy_t      page_cxy;
     411                page_t   * page_ptr;
     412                xptr_t     forks_xp;
     413
     414                // update flags in remote GPT
     415                hal_gpt_set_cow( remote_gpt_xp,
     416                                 vpn_base,
     417                                 vpn_size );
     418
     419                // atomically increment pending forks counter in physical pages,
     420                // for all vseg pages that are mapped in reference cluster
     421                if( remote_process_cxy == local_cxy )
     422                {
     423                    // the reference GPT is the local GPT
     424                    gpt_t * gpt = GET_PTR( remote_gpt_xp );
     425
     426                    // scan all pages in vseg
     427                    for( vpn = vpn_base ; vpn < (vpn_base + vpn_size) ; vpn++ )
     428                    {
     429                        // get page attributes and PPN from reference GPT
     430                        hal_gpt_get_pte( gpt , vpn , &attr , &ppn );
     431
     432                        // atomically update pending forks counter if page is mapped
     433                        if( attr & GPT_MAPPED )
     434                        {
     435                            page_xp  = ppm_ppn2page( ppn );
     436                            page_cxy = GET_CXY( page_xp );
     437                            page_ptr = GET_PTR( page_xp );
     438                            forks_xp = XPTR( page_cxy , &page_ptr->forks );
     439                            hal_remote_atomic_add( forks_xp , 1 );
     440                        }
     441                    }   // end loop on vpn
     442                }   // end if local
     443            }   // end if vseg type
     444        }   // end loop on vsegs
    373445    }   // end loop on process copies
    374446 
    375 vmm_dmsg("\n[DBG] %s : core[%x,%d] exit for process %x\n",
    376 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , process->pid );
     447#if CONFIG_DEBUG_VMM_SET_COW
     448cycle = (uint32_t)hal_get_cycles();
     449if( CONFIG_DEBUG_VMM_SET_COW < cycle )
     450printk("\n[DBG] %s : thread %x exit for process %x / cycle %d\n",
     451__FUNCTION__ , CURRENT_THREAD , process->pid , cycle );
     452#endif
    377453
    378454}  // end vmm_set-cow()
     
    404480    ppn_t       ppn;
    405481
    406 vmm_dmsg("\n[DBG] %s : core[%x,%d] enter\n",
    407 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid );
     482#if CONFIG_DEBUG_VMM_FORK_COPY
     483uint32_t cycle = (uint32_t)hal_get_cycles();
     484if( CONFIG_DEBUG_VMM_FORK_COPY < cycle )
     485printk("\n[DBG] %s : thread %x enter / cycle %d\n",
     486__FUNCTION__ , CURRENT_THREAD, cycle );
     487#endif
    408488
    409489    // get parent process cluster and local pointer
    410490    parent_cxy     = GET_CXY( parent_process_xp );
    411     parent_process = (process_t *)GET_PTR( parent_process_xp );
     491    parent_process = GET_PTR( parent_process_xp );
    412492
    413493    // get local pointers on parent and child VMM
     
    445525        // get local and extended pointers on current parent vseg
    446526        parent_vseg_xp = XLIST_ELEMENT( iter_xp , vseg_t , xlist );
    447         parent_vseg    = (vseg_t *)GET_PTR( parent_vseg_xp );
     527        parent_vseg    = GET_PTR( parent_vseg_xp );
    448528
    449529        // get vseg type
    450530        type = hal_remote_lw( XPTR( parent_cxy , &parent_vseg->type ) );
    451531       
    452 
    453 vmm_dmsg("\n[DBG] %s : core[%x,%d] found parent vseg %s / vpn_base = %x\n",
    454 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , vseg_type_str(type),
    455 hal_remote_lw( XPTR( parent_cxy , &parent_vseg->vpn_base ) ) );
     532#if CONFIG_DEBUG_VMM_FORK_COPY
     533cycle = (uint32_t)hal_get_cycles();
     534if( CONFIG_DEBUG_VMM_FORK_COPY < cycle )
     535printk("\n[DBG] %s : thread %x found parent vseg %s / vpn_base = %x / cycle %d\n",
     536__FUNCTION__ , CURRENT_THREAD, vseg_type_str(type),
     537hal_remote_lw( XPTR( parent_cxy , &parent_vseg->vpn_base ) ) , cycle );
     538#endif
    456539
    457540        // all parent vsegs - but STACK - must be copied in child VSL
     
    473556            vseg_attach( child_vmm , child_vseg );
    474557
    475 vmm_dmsg("\n[DBG] %s : core[%x,%d] copied to child VSL : vseg %s / vpn_base = %x\n",
    476 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , vseg_type_str(type),
    477 hal_remote_lw( XPTR( parent_cxy , &parent_vseg->vpn_base ) ) );
     558#if CONFIG_DEBUG_VMM_FORK_COPY
     559cycle = (uint32_t)hal_get_cycles();
     560if( CONFIG_DEBUG_VMM_FORK_COPY < cycle )
     561printk("\n[DBG] %s : thread %x copied vseg %s / vpn_base = %x to child VSL / cycle %d\n",
     562__FUNCTION__ , CURRENT_THREAD , vseg_type_str(type),
     563hal_remote_lw( XPTR( parent_cxy , &parent_vseg->vpn_base ) ) , cycle );
     564#endif
    478565
    479566            // copy DATA, MMAP, REMOTE, FILE parent GPT entries to child GPT
     
    502589                    }
    503590
    504                     // increment page descriptor fork_nr for the referenced page if mapped
     591                    // increment pending forks counter in page if mapped
    505592                    if( mapped )
    506593                    {
    507594                        page_xp = ppm_ppn2page( ppn );
    508595                        page_cxy = GET_CXY( page_xp );
    509                         page_ptr = (page_t *)GET_PTR( page_xp );
    510                         hal_remote_atomic_add( XPTR( page_cxy , &page_ptr->fork_nr ) , 1 );
    511 
    512 vmm_dmsg("\n[DBG] %s : core[%x,%d] copied to child GPT : vpn %x\n",
    513 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , vpn );
     596                        page_ptr = GET_PTR( page_xp );
     597                        hal_remote_atomic_add( XPTR( page_cxy , &page_ptr->forks ) , 1 );
     598
     599#if CONFIG_DEBUG_VMM_FORK_COPY
     600cycle = (uint32_t)hal_get_cycles();
     601if( CONFIG_DEBUG_VMM_FORK_COPY < cycle )
     602printk("\n[DBG] %s : thread %x copied vpn %x to child GPT / cycle %d\n",
     603__FUNCTION__ , CURRENT_THREAD , vpn , cycle );
     604#endif
    514605
    515606                    }
     
    558649    hal_fence();
    559650
     651#if CONFIG_DEBUG_VMM_FORK_COPY
     652cycle = (uint32_t)hal_get_cycles();
     653if( CONFIG_DEBUG_VMM_FORK_COPY < cycle )
     654printk("\n[DBG] %s : thread %x exit successfully / cycle %d\n",
     655__FUNCTION__ , CURRENT_THREAD , cycle );
     656#endif
     657
    560658    return 0;
    561659
     
    568666        vseg_t * vseg;
    569667
    570 vmm_dmsg("\n[DBG] %s : core[%x,%d] enter\n",
    571 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid );
    572 
    573     // get pointer on VMM
     668#if CONFIG_DEBUG_VMM_DESTROY
     669uint32_t cycle = (uint32_t)hal_get_cycles();
     670if( CONFIG_DEBUG_VMM_DESTROY < cycle )
     671printk("\n[DBG] %s : thread %x enter for process %x / cycle %d\n",
     672__FUNCTION__ , CURRENT_THREAD , process->pid , cycle );
     673#endif
     674
     675    // get pointer on local VMM
    574676    vmm_t  * vmm = &process->vmm;
    575677
     
    586688        // get pointer on first vseg in VSL
    587689                vseg_xp = XLIST_FIRST_ELEMENT( root_xp , vseg_t , xlist );
    588         vseg = (vseg_t *)GET_PTR( vseg_xp );
    589 
    590         // unmap and release all pages
     690        vseg    = GET_PTR( vseg_xp );
     691
     692        // unmap rand release physical pages if required)
    591693        vmm_unmap_vseg( process , vseg );
    592694
     
    598700        }
    599701
    600     // release lock
     702    // release lock protecting VSL
    601703        remote_rwlock_wr_unlock( lock_xp );
    602704
     
    616718    hal_gpt_destroy( &vmm->gpt );
    617719
    618 vmm_dmsg("\n[DBG] %s : core[%x,%d] exit\n",
    619 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid );
     720#if CONFIG_DEBUG_VMM_DESTROY
     721cycle = (uint32_t)hal_get_cycles();
     722if( CONFIG_DEBUG_VMM_DESTROY < cycle )
     723printk("\n[DBG] %s : thread %x exit / cycle %d\n",
     724__FUNCTION__ , CURRENT_THREAD , cycle );
     725#endif
    620726
    621727}  // end vmm_destroy()
     
    637743        {
    638744                vseg_xp = XLIST_ELEMENT( iter_xp , vseg_t , xlist );
    639         vseg    = (vseg_t *)GET_PTR( vseg_xp );
     745        vseg    = GET_PTR( vseg_xp );
    640746
    641747                if( ((vpn_base + vpn_size) > vseg->vpn_base) &&
     
    766872        error_t      error;
    767873
    768 vmm_dmsg("\n[DBG] %s : core[%x,%d] enters / process %x / base %x / size %x / %s / cxy = %x\n",
    769 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid ,
    770 process->pid , base , size , vseg_type_str(type) , cxy );
     874#if CONFIG_DEBUG_VMM_CREATE_VSEG
     875uint32_t cycle = (uint32_t)hal_get_cycles();
     876if( CONFIG_DEBUG_VMM_CREATE_VSEG < cycle )
     877printk("\n[DBG] %s : thread %x enter / process %x / base %x / size %x / %s / cxy %x / cycle %d\n",
     878__FUNCTION__, CURRENT_THREAD, process->pid, base, size, vseg_type_str(type), cxy, cycle );
     879#endif
    771880
    772881    // get pointer on VMM
     
    854963        remote_rwlock_wr_unlock( lock_xp );
    855964
    856 vmm_dmsg("\n[DBG] %s : core[%x,%d] exit / process %x / base %x / size %x / type %s\n",
    857 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid ,
    858 process->pid , base , size , vseg_type_str(type) );
     965#if CONFIG_DEBUG_VMM_CREATE_VSEG
     966cycle = (uint32_t)hal_get_cycles();
     967if( CONFIG_DEBUG_VMM_CREATE_VSEG < cycle )
     968printk("\n[DBG] %s : thread %x exit / process %x / %s / cxy %x / cycle %d\n",
     969__FUNCTION__, CURRENT_THREAD, process->pid, vseg_type_str(type), cxy, cycle );
     970#endif
    859971
    860972        return vseg;
     
    9851097    cxy_t       page_cxy;   // page descriptor cluster
    9861098    page_t    * page_ptr;   // page descriptor pointer
    987 
    988 vmm_dmsg("\n[DBG] %s : core[%x, %d] enter / process %x / vseg %s / base %x / cycle %d\n",
    989 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, process->pid ,
    990 vseg_type_str( vseg->type ), vseg->vpn_base, (uint32_t)hal_get_cycles() );
    991 
    992     // get pointer on process GPT
     1099    xptr_t      forks_xp;   // extended pointer on pending forks counter
     1100    uint32_t    count;      // actual number of pendinf forks
     1101
     1102#if CONFIG_DEBUG_VMM_UNMAP_VSEG
     1103uint32_t cycle = (uint32_t)hal_get_cycles();
     1104if( CONFIG_DEBUG_VMM_UNMAP_VSEG < cycle )
     1105printk("\n[DBG] %s : thread %x enter / process %x / vseg %s / base %x / cycle %d\n",
     1106__FUNCTION__, CURRENT_THREAD, process->pid, vseg_type_str( vseg->type ), vseg->vpn_base, cycle );
     1107#endif
     1108
     1109    // get pointer on local GPT
    9931110    gpt_t     * gpt = &process->vmm.gpt;
    9941111
     
    10071124            "an user vseg must use small pages" );
    10081125
    1009             // unmap GPT entry
     1126            // unmap GPT entry in all GPT copies
    10101127            hal_gpt_reset_pte( gpt , vpn );
    10111128
    1012             // release memory if not identity mapped
    1013             if( (vseg->flags & VSEG_IDENT)  == 0 )
     1129            // handle pending forks counter if
     1130            // 1) not identity mapped
     1131            // 2) running in reference cluster
     1132            if( ((vseg->flags & VSEG_IDENT)  == 0) &&
     1133                (GET_CXY( process->ref_xp ) == local_cxy) )
    10141134            {
    1015                 // get extended pointer on page descriptor
     1135                // get extended pointer on physical page descriptor
    10161136                page_xp  = ppm_ppn2page( ppn );
    10171137                page_cxy = GET_CXY( page_xp );
    1018                 page_ptr = (page_t *)GET_PTR( page_xp );
    1019 
    1020                 // release physical page to relevant cluster
    1021                 if( page_cxy == local_cxy )                   // local cluster
     1138                page_ptr = GET_PTR( page_xp );
     1139
     1140                // FIXME lock the physical page
     1141
     1142                // get extended pointer on pending forks counter
     1143                forks_xp = XPTR( page_cxy , &page_ptr->forks );
     1144
     1145                // get pending forks counter
     1146                count = hal_remote_lw( forks_xp );
     1147               
     1148                if( count )  // decrement pending forks counter
    10221149                {
    1023                     req.type = KMEM_PAGE;
    1024                     req.ptr  = page_ptr;
    1025                     kmem_free( &req );
     1150                    hal_remote_atomic_add( forks_xp , -1 );
     1151                } 
     1152                else         // release physical page to relevant cluster
     1153                {
     1154                    if( page_cxy == local_cxy )   // local cluster
     1155                    {
     1156                        req.type = KMEM_PAGE;
     1157                        req.ptr  = page_ptr;
     1158                        kmem_free( &req );
     1159                    }
     1160                    else                          // remote cluster
     1161                    {
     1162                        rpc_pmem_release_pages_client( page_cxy , page_ptr );
     1163                    }
    10261164                }
    1027                 else                                          // remote cluster
    1028                 {
    1029                     rpc_pmem_release_pages_client( page_cxy , page_ptr );
    1030                 }
     1165
     1166                // FIXME unlock the physical page
    10311167            }
    10321168        }
    10331169    }
     1170
     1171#if CONFIG_DEBUG_VMM_UNMAP_VSEG
     1172cycle = (uint32_t)hal_get_cycles();
     1173if( CONFIG_DEBUG_VMM_UNMAP_VSEG < cycle )
     1174printk("\n[DBG] %s : thread %x exit / process %x / vseg %s / base %x / cycle %d\n",
     1175__FUNCTION__, CURRENT_THREAD, process->pid, vseg_type_str( vseg->type ), vseg->vpn_base, cycle );
     1176#endif
     1177
    10341178}  // end vmm_unmap_vseg()
    10351179
     
    10611205    {
    10621206        vseg_xp = XLIST_ELEMENT( iter_xp , vseg_t , xlist );
    1063         vseg    = (vseg_t *)GET_PTR( vseg_xp );
     1207        vseg    = GET_PTR( vseg_xp );
    10641208        if( (vaddr >= vseg->min) && (vaddr < vseg->max) )
    10651209        {
     
    11851329        // get cluster and local pointer on reference process
    11861330        cxy_t       ref_cxy = GET_CXY( ref_xp );
    1187         process_t * ref_ptr = (process_t *)GET_PTR( ref_xp );
     1331        process_t * ref_ptr = GET_PTR( ref_xp );
    11881332
    11891333        if( local_cxy == ref_cxy )  return -1;   // local cluster is the reference
     
    12241368                                 vpn_t    vpn )
    12251369{
     1370
     1371#if CONFIG_DEBUG_VMM_ALLOCATE_PAGE
     1372if( CONFIG_DEBUG_VMM_ALLOCATE_PAGE < (uint32_t)hal_get_cycles() )
     1373printk("\n[DBG] in %s : thread %x enter for vpn %x\n",
     1374__FUNCTION__ , CURRENT_THREAD, vpn );
     1375#endif
     1376
    12261377    // compute target cluster
    12271378    page_t     * page_ptr;
     
    12621413    }
    12631414
     1415#if CONFIG_DEBUG_VMM_ALLOCATE_PAGE
     1416if( CONFIG_DEBUG_VMM_ALLOCATE_PAGE < (uint32_t)hal_get_cycles() )
     1417printk("\n[DBG] in %s : thread %x exit for vpn = %d / ppn = %x\n",
     1418__FUNCTION__ , CURRENT_THREAD, vpn, ppm_page2ppn( XPTR( page_cxy , page_ptr ) ) );
     1419#endif
     1420
    12641421    if( page_ptr == NULL ) return XPTR_NULL;
    12651422    else                   return XPTR( page_cxy , page_ptr );
     
    12811438    index     = vpn - vseg->vpn_base;
    12821439
    1283 vmm_dmsg("\n[DBG] %s : core[%x,%d] enter for vpn = %x / type = %s / index = %d\n",
    1284 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, vpn, vseg_type_str(type), index );
     1440#if CONFIG_DEBUG_VMM_GET_ONE_PPN
     1441if( CONFIG_DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() )
     1442printk("\n[DBG] %s : thread %x enter for vpn = %x / type = %s / index = %d\n",
     1443__FUNCTION__, CURRENT_THREAD, vpn, vseg_type_str(type), index );
     1444#endif
    12851445
    12861446    // FILE type : get the physical page from the file mapper
     
    12951455        // get mapper cluster and local pointer
    12961456        cxy_t      mapper_cxy = GET_CXY( mapper_xp );
    1297         mapper_t * mapper_ptr = (mapper_t *)GET_PTR( mapper_xp );
     1457        mapper_t * mapper_ptr = GET_PTR( mapper_xp );
    12981458
    12991459        // get page descriptor from mapper
     
    13161476    else
    13171477    {
    1318         // allocate physical page
     1478        // allocate one physical page
    13191479        page_xp = vmm_page_allocate( vseg , vpn );
    13201480
     
    13221482
    13231483        // initialise missing page from .elf file mapper for DATA and CODE types
    1324         // => the mapper_xp field is an extended pointer on the .elf file mapper
     1484        // (the vseg->mapper_xp field is an extended pointer on the .elf file mapper)
    13251485        if( (type == VSEG_TYPE_CODE) || (type == VSEG_TYPE_DATA) )
    13261486        {
     
    13331493            // get mapper cluster and local pointer
    13341494            cxy_t      mapper_cxy = GET_CXY( mapper_xp );
    1335             mapper_t * mapper_ptr = (mapper_t *)GET_PTR( mapper_xp );
     1495            mapper_t * mapper_ptr = GET_PTR( mapper_xp );
    13361496
    13371497            // compute missing page offset in vseg
     
    13411501            uint32_t elf_offset = vseg->file_offset + offset;
    13421502
    1343 vmm_dmsg("\n[DBG] %s : core[%x,%d] for vpn = %x / elf_offset = %x\n",
    1344 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, vpn, elf_offset );
     1503#if (CONFIG_DEBUG_VMM_GET_ONE_PPN & 0x1)
     1504if( CONFIG_DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() )
     1505printk("\n[DBG] %s : thread %x for vpn = %x / elf_offset = %x\n",
     1506__FUNCTION__, CURRENT_THREAD, vpn, elf_offset );
     1507#endif
    13451508
    13461509            // compute extended pointer on page base
     
    13521515            if( file_size < offset )                 // missing page fully in  BSS
    13531516            {
    1354 vmm_dmsg("\n[DBG] %s : core[%x,%d] for vpn = %x / fully in BSS\n",
    1355 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, vpn );
     1517
     1518#if (CONFIG_DEBUG_VMM_GET_ONE_PPN & 0x1)
     1519if( CONFIG_DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() )
     1520printk("\n[DBG] %s : thread%x for vpn = %x / fully in BSS\n",
     1521__FUNCTION__, CURRENT_THREAD, vpn );
     1522#endif
    13561523
    13571524                if( GET_CXY( page_xp ) == local_cxy )
     
    13671534            {
    13681535
    1369 vmm_dmsg("\n[DBG] %s : core[%x,%d] for vpn = %x / fully in mapper\n",
    1370 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, vpn );
     1536#if (CONFIG_DEBUG_VMM_GET_ONE_PPN & 0x1)
     1537if( CONFIG_DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() )
     1538printk("\n[DBG] %s : thread %x, for vpn = %x / fully in mapper\n",
     1539__FUNCTION__, CURRENT_THREAD, vpn );
     1540#endif
    13711541
    13721542                if( mapper_cxy == local_cxy )
     
    13961566            {
    13971567
    1398 vmm_dmsg("\n[DBG] %s : core[%x,%d] for vpn = %x / both mapper & BSS\n"
    1399          "      %d bytes from mapper / %d bytes from BSS\n",
    1400 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, vpn,
     1568#if (CONFIG_DEBUG_VMM_GET_ONE_PPN & 0x1)
     1569if( CONFIG_DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() )
     1570printk("\n[DBG] %s : thread %x for vpn = %x / both mapper & BSS\n"
     1571"      %d bytes from mapper / %d bytes from BSS\n",
     1572__FUNCTION__, CURRENT_THREAD, vpn,
    14011573file_size - offset , offset + CONFIG_PPM_PAGE_SIZE - file_size  );
    1402 
     1574#endif
    14031575                // initialize mapper part
    14041576                if( mapper_cxy == local_cxy )
     
    14411613    *ppn = ppm_page2ppn( page_xp );
    14421614
    1443 vmm_dmsg("\n[DBG] %s : core[%x,%d] exit for vpn = %x / ppn = %x\n",
    1444 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , vpn , *ppn );
     1615#if CONFIG_DEBUG_VMM_GET_ONE_PPN
     1616if( CONFIG_DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() )
     1617printk("\n[DBG] %s : thread %x exit for vpn = %x / ppn = %x\n",
     1618__FUNCTION__ , CURRENT_THREAD , vpn , *ppn );
     1619#endif
    14451620
    14461621    return 0;
     
    14551630                     ppn_t     * ppn )
    14561631{
    1457     vseg_t  * vseg;       // pointer on vseg containing VPN
     1632    vseg_t  * vseg;       // vseg containing VPN
    14581633    ppn_t     old_ppn;    // current PTE_PPN
    14591634    uint32_t  old_attr;   // current PTE_ATTR
     
    14661641    "not called in the reference cluster\n" );
    14671642
    1468 vmm_dmsg("\n[DBG] %s : core[%x,%d] enter for vpn = %x in process %x / cow = %d\n",
    1469 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , vpn , process->pid , cow );
     1643#if CONFIG_DEBUG_VMM_GET_PTE
     1644uint32_t cycle = (uint32_t)hal_get_cycles();
     1645if( CONFIG_DEBUG_VMM_GET_PTE > cycle )
     1646printk("\n[DBG] %s : thread %x enter for vpn = %x / process %x / cow = %d / cycle %d\n",
     1647__FUNCTION__ , CURRENT_THREAD , vpn , process->pid , cow , cycle );
     1648#endif
    14701649
    14711650    // get VMM pointer
    14721651    vmm_t * vmm = &process->vmm;
    14731652
    1474     // get vseg pointer from ref VSL
     1653    // get vseg pointer from reference VSL
    14751654    error = vmm_get_vseg( process , vpn<<CONFIG_PPM_PAGE_SHIFT , &vseg );
    14761655
     
    14821661    }
    14831662
    1484 vmm_dmsg("\n[DBG] %s : core[%x,%d] found vseg %s / vpn_base = %x / vpn_size = %x\n",
    1485 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid ,
    1486 vseg_type_str(vseg->type) , vseg->vpn_base , vseg->vpn_size );
     1663#if CONFIG_DEBUG_VMM_GET_PTE
     1664cycle = (uint32_t)hal_get_cycles();
     1665if( CONFIG_DEBUG_VMM_GET_PTE < cycle )
     1666printk("\n[DBG] %s : thread %x found vseg %s / vpn_base = %x / vpn_size = %x\n",
     1667__FUNCTION__, CURRENT_THREAD, vseg_type_str(vseg->type), vseg->vpn_base, vseg->vpn_size );
     1668#endif
    14871669
    14881670    // access GPT to get current PTE attributes and PPN
     
    14931675    // clusters containing a copy, and return the new_ppn and new_attr
    14941676
    1495     if( cow )               ////////////// copy_on_write request ///////////
     1677    if( cow )  /////////////////////////// copy_on_write request //////////////////////
    14961678    {
    14971679        assert( (old_attr & GPT_MAPPED) , __FUNCTION__ ,
    14981680        "PTE must be mapped for a copy-on-write exception\n" );
    14991681
    1500 excp_dmsg("\n[DBG] %s : core[%x,%d] handling COW for vpn %x\n",
    1501 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , vpn );
    1502 
    1503         // get extended pointer, cluster and local pointer on page descriptor
     1682#if CONFIG_DEBUG_VMM_GET_PTE
     1683cycle = (uint32_t)hal_get_cycles();
     1684if( CONFIG_DEBUG_VMM_GET_PTE < cycle )
     1685printk("\n[DBG] %s : thread %x handling COW for vpn %x in process %x\n",
     1686__FUNCTION__, CURRENT_THREAD, vpn, process->pid );
     1687#endif
     1688
     1689        // get extended pointer, cluster and local pointer on physical page descriptor
    15041690        xptr_t   page_xp  = ppm_ppn2page( old_ppn );
    15051691        cxy_t    page_cxy = GET_CXY( page_xp );
    1506         page_t * page_ptr = (page_t *)GET_PTR( page_xp );
     1692        page_t * page_ptr = GET_PTR( page_xp );
    15071693
    15081694        // get number of pending forks in page descriptor
    1509         uint32_t count = hal_remote_lw( XPTR( page_cxy , &page_ptr->fork_nr ) );
    1510 
    1511         if( count )        // pending fork => allocate a new page, copy it, reset COW
     1695        uint32_t forks = hal_remote_lw( XPTR( page_cxy , &page_ptr->forks ) );
     1696
     1697        if( forks )        // pending fork => allocate a new page, copy old to new
    15121698        {
    15131699            // allocate a new physical page
     
    15391725
    15401726        // update GPT[vpn] for all GPT copies
    1541         // to maintain coherence of copies
    1542         vmm_update_pte( process,
    1543                         vpn,
    1544                         new_attr,
    1545                         new_ppn );
    1546 
    1547         // decrement fork_nr in page descriptor
    1548         hal_remote_atomic_add( XPTR( page_cxy , &page_ptr->fork_nr ) , -1 );
    1549     }
    1550     else                         /////////////// page_fault request ///////////
     1727        vmm_global_update_pte( process, vpn, new_attr, new_ppn );
     1728
     1729        // decrement pending forks counter in page descriptor
     1730        hal_remote_atomic_add( XPTR( page_cxy , &page_ptr->forks ) , -1 );
     1731    }
     1732    else  ////////////////////////////////// page_fault request ////////////////////////
    15511733    { 
    15521734        if( (old_attr & GPT_MAPPED) == 0 )   // true page_fault => map it
    15531735        {
    15541736
    1555 excp_dmsg("\n[DBG] %s : core[%x,%d] handling page fault for vpn %x\n",
    1556 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , vpn );
     1737#if CONFIG_DEBUG_VMM_GET_PTE
     1738cycle = (uint32_t)hal_get_cycles();
     1739if( CONFIG_DEBUG_VMM_GET_PTE < cycle )
     1740printk("\n[DBG] %s : thread %x handling page fault for vpn %x in process %x\n",
     1741__FUNCTION__, CURRENT_THREAD, vpn, process->pid );
     1742#endif
    15571743
    15581744            // allocate new_ppn, depending on vseg type
     
    15921778    }
    15931779
    1594 excp_dmsg("\n[DBG] %s : core[%x,%d] update GPT for vpn %x / ppn = %x / attr = %x\n",
    1595 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , vpn , new_ppn , new_attr );
    1596 
    1597     // retur success
     1780#if CONFIG_DEBUG_VMM_GET_PTE
     1781cycle = (uint32_t)hal_get_cycles();
     1782if( CONFIG_DEBUG_VMM_GET_PTE < cycle )
     1783printk("\n[DBG] %s : thread,%x exit for vpn %x in process %x / ppn = %x / attr = %x / cycle %d\n",
     1784__FUNCTION__, CURRENT_THREAD, vpn, process->pid, new_ppn, new_attr, cycle );
     1785#endif
     1786
     1787    // return success
    15981788    *ppn  = new_ppn;
    15991789    *attr = new_attr;
     
    16121802    // get reference process cluster and local pointer
    16131803    cxy_t       ref_cxy = GET_CXY( process->ref_xp );
    1614     process_t * ref_ptr = (process_t *)GET_PTR( process->ref_xp );
     1804    process_t * ref_ptr = GET_PTR( process->ref_xp );
    16151805
    16161806    // get missing PTE attributes and PPN from reference cluster
     
    16511841                        vpn_t       vpn )
    16521842{
    1653     uint32_t         attr;          // missing page attributes
    1654     ppn_t            ppn;           // missing page PPN
     1843    uint32_t         attr;          // page attributes
     1844    ppn_t            ppn;           // page PPN
    16551845    error_t          error;
    16561846
     1847   
    16571848    // get reference process cluster and local pointer
    16581849    cxy_t       ref_cxy = GET_CXY( process->ref_xp );
    1659     process_t * ref_ptr = (process_t *)GET_PTR( process->ref_xp );
     1850    process_t * ref_ptr = GET_PTR( process->ref_xp );
    16601851
    16611852    // get new PTE attributes and PPN from reference cluster
     
    17221913    {
    17231914        cxy_t       ref_cxy = GET_CXY( process->ref_xp );
    1724         process_t * ref_ptr = (process_t *)GET_PTR( process->ref_xp );
     1915        process_t * ref_ptr = GET_PTR( process->ref_xp );
    17251916        rpc_vmm_get_pte_client( ref_cxy , ref_ptr , vpn , false , &attr , &ppn , &error );
    17261917    }
  • trunk/kernel/mm/vmm.h

    r429 r433  
    9999 *    a remote_rwlock, because it can be accessed by a thread running in a remote cluster.
    100100 *    An exemple is the vmm_fork_copy() function.
    101  * 2. In most custers, the VSL and GPT are only partial copies of the reference VSL and GPT
     101 * 2. In most clusters, the VSL and GPT are only partial copies of the reference VSL and GPT
    102102 *    structures, stored in the reference cluster.
    103103 ********************************************************************************************/
     
    155155
    156156/*********************************************************************************************
    157  * This function is called by the process_fork_create() function. It partially copies
     157 * This function is called by the process_make_fork() function. It partially copies
    158158 * the content of a remote parent process VMM to the local child process VMM:
    159159 * - all DATA, MMAP, REMOTE vsegs registered in the parent VSL are registered in the child
     
    176176
    177177/*********************************************************************************************
    178  * This function is called by the process_make_fork() function to handle the fork syscall.
     178 * This function is called by the process_make_fork() function executing the fork syscall.
    179179 * It set the COW flag, and reset the WRITABLE flag of all GPT entries of the DATA, MMAP,
    180180 * and REMOTE vsegs of a process identified by the <process> argument.
    181181 * It must be called by a thread running in the reference cluster, that contains the complete
    182  * list of vsegs. Use the rpc_vmm_set_cow_client() when the calling thread client is remote.
     182 * VSL and GPT (use the rpc_vmm_set_cow_client() when the calling thread client is remote).
    183183 * It updates all copies of the process in all clusters, to maintain coherence in GPT copies,
    184184 * using the list of copies stored in the owner process, and using remote_write accesses to
    185  * update the remote GPTs. It cannot fail, as only mapped entries in GPT copies are updated.
     185 * update the remote GPTs. It atomically increment the pending_fork counter, in all involved
     186 * physical page descriptors. It cannot fail, as only mapped entries in GPTs are updated.
    186187 *********************************************************************************************
    187188 * @ process   : local pointer on local reference process descriptor.
     
    190191
    191192/*********************************************************************************************
    192  * This function is called by the vmm_get_pte() function in case of COW exception.
    193  * It modifies both the PPN an the attributes for a GPT entry identified by the <process>
    194  * and <vpn> arguments.
     193 * This global function modifies a GPT entry identified  by the <process> and <vpn>
     194 * arguments in all clusters containing a process copy.
     195 * It must be called by a thread running in the reference cluster.
    195196 * It updates all copies of the process in all clusters, to maintain coherence in GPT copies,
    196197 * using the list of copies stored in the owner process, and using remote_write accesses to
     
    202203 * @ ppn       : PTE / physical page index.
    203204 ********************************************************************************************/
    204 void vmm_update_pte( struct process_s * process,
    205                      vpn_t              vpn,
    206                      uint32_t           attr,
    207                      ppn_t              ppn );
    208 
    209 /*********************************************************************************************
    210  * This function scan the list of vsegs registered in the VSL of the process
    211  * identified by the <process> argument, and for each vseg:
    212  * - it unmap from the GPT and releases all mapped pages in vseg.
    213  * - it removes the vseg from the process VSL.
    214  * - It releases the memory allocated to the vseg descriptor.
     205void vmm_global_update_pte( struct process_s * process,
     206                            vpn_t              vpn,
     207                            uint32_t           attr,
     208                            ppn_t              ppn );
     209
     210/*********************************************************************************************
     211 * This function unmaps from the local GPT all mapped PTEs of a vseg identified by the
     212 * <process> and <vseg> arguments. It can be used for any type of vseg.
     213 * If this function is executed in the reference cluster, it handles for each referenced
     214 * physical pages the pending forks counter :
     215 * - if counter is non-zero, it decrements it.
     216 * - if counter is zero, it releases the physical page to local kmem allocator.
     217 *********************************************************************************************
     218 * @ process  : pointer on process descriptor.
     219 * @ vseg     : pointer on the vseg to be unmapped.
     220 ********************************************************************************************/
     221void vmm_unmap_vseg( struct process_s * process,
     222                     vseg_t           * vseg );
     223
     224/*********************************************************************************************
     225 * This function deletes, in the local cluster, all vsegs registered in the VSL
     226 * of the process identified by the <process> argument. For each vseg:
     227 * - it unmaps all vseg PTEs from the GPT (release the physical pages when required).
     228 * - it removes the vseg from the local VSL.
     229 * - it releases the memory allocated to the local vseg descriptors.
    215230 * Finally, it releases the memory allocated to the GPT itself.
    216231 *********************************************************************************************
     
    291306
    292307/*********************************************************************************************
    293  * This function unmaps all mapped PTEs of a given vseg, from the generic page table
    294  * associated to a given process descriptor, and releases the physical memory allocated
    295  * to all mapped GPT entries.  It can be used for any type of vseg.
    296  *********************************************************************************************
    297  * @ process  : pointer on process descriptor.
    298  * @ vseg     : pointer on the vseg to be unmapped.
    299  ********************************************************************************************/
    300 void vmm_unmap_vseg( struct process_s * process,
    301                      vseg_t           * vseg );
    302 
    303 /*********************************************************************************************
    304308 * This function removes a given region (defined by a base address and a size) from
    305309 * the VMM of a given process descriptor. This can modify the number of vsegs:
     
    340344/*********************************************************************************************
    341345 * This function is called by the generic exception handler when a page-fault event
    342  * has been detected in a given cluster.
     346 * has been detected for a given process in a given cluster.
    343347 * - If the local cluster is the reference, it call directly the vmm_get_pte() function.
    344348 * - If the local cluster is not the reference cluster, it send a RPC_VMM_GET_PTE
     
    355359/*********************************************************************************************
    356360 * This function is called by the generic exception handler when a copy-on-write event
    357  * has been detected in a given cluster.
    358  * - If the local cluster is the reference, it call directly the vmm_get_pte() function.
    359  * - If the local cluster is not the reference cluster, it send a RPC_VMM_GET_PTE
    360  *   to the reference cluster to get the missing PTE attributes and PPN,
    361  *   and update the local page table.
     361 * has been detected for a given process in a given cluster.
     362 * It takes the lock protecting the physical page, and test the pending forks counter.
     363 * If no pending fork:
     364 * - it reset the COW flag and set the WRITE flag in the reference GPT entry, and in all
     365 *   the GPT copies
     366
     367 * If there is a pending forkon the
     368 * - It get the involved vseg pointer.
     369 * - It allocates a new physical page from the cluster defined by the vseg type.
     370 * - It copies the old physical page content to the new physical page.
     371 * - It decrements the pending_fork counter in old physical page descriptor.
     372
    362373 *********************************************************************************************
    363374 * @ process   : pointer on process descriptor.
     
    369380
    370381/*********************************************************************************************
    371  * This function is called when a new PTE (GPT entry) is required because a "page-fault",
    372  * or "copy-on_write" event has been detected for a given <vpn> in a given <process>.
    373  * The <cow> argument defines the type of event to be handled.
     382 * This function handle both the "page-fault" and "copy-on_write" events for a given <vpn>
     383 * in a given <process>.  The <cow> argument defines the type of event to be handled.
    374384 * This function must be called by a thread running in reference cluster, and the vseg
    375  * containing the searched VPN should be registered in the reference VMM.
    376  * - for an actual page-fault, it allocates the missing physical page from the target cluster
    377  *   defined by the vseg type, initialize it, and update the reference page table.
     385 * containing the searched VPN must be registered in the reference VMM.
     386 * - for an page-fault, it allocates the missing physical page from the target cluster
     387 *   defined by the vseg type, initializes it, and updates the reference GPT, but not
     388 *   the copies GPT, that will be updated on demand.
    378389 * - for a copy-on-write, it allocates a new physical page from the target cluster,
    379  *   initialise it from the old physical page, and update the reference page table.
    380  * In both cases, it calls the RPC_PMEM_GET_PAGES to get the new physical page if the
    381  * target cluster is not the reference cluster.
     390 *   initialise it from the old physical page, and updates the reference GPT and all
     391 *   the GPT copies, for coherence.
     392 * In both cases, it calls the RPC_PMEM_GET_PAGES to get the new physical page when
     393 * the target cluster is not the reference cluster.
    382394 * It returns in the <attr> and <ppn> arguments the accessed or modified PTE.
    383395 *********************************************************************************************
     
    400412 * (Physical Page Number) associated to a missing page defined by the <vpn> argument.
    401413 * - For the FILE type, it returns directly the physical page from the file mapper.
    402  * - For the CODE and DATA types, it allocates a new phsical page from the cluster defined
     414 * - For the CODE and DATA types, it allocates a new physical page from the cluster defined
    403415 *   by the <vseg->cxy> field, or by the <vpn> MSB bits for a distributed vseg,
    404416 *   and initialize this page from the .elf file mapper.
  • trunk/kernel/syscalls/sys_display.c

    r421 r433  
    2525#include <hal_uspace.h>
    2626#include <errno.h>
     27#include <vmm.h>
    2728#include <cluster.h>
    2829#include <thread.h>
    2930#include <process.h>
     31#include <string.h>
    3032
    3133
     
    3537                 reg_t  arg1 )
    3638{
     39    // get thread, process and core
     40    thread_t  * this    = CURRENT_THREAD;
     41    process_t * process = this->process;
     42    core_t    * core    = this->core;
     43
     44#if CONFIG_DEBUG_SYS_DISPLAY
     45uint64_t    tm_start;
     46uint64_t    tm_end;
     47tm_start = hal_get_cycles();
     48if( CONFIG_DEBUG_SYS_DISPLAY < tm_start )
     49printk("\n[DBG] %s : thread %d enter / process %x / cycle = %d\n",
     50__FUNCTION__, this, process->pid, (uint32_t)tm_start );
     51#endif
     52
    3753    if( type == DISPLAY_STRING )
    3854    {
    3955        paddr_t   paddr;
    4056        char      kbuf[256];
     57        uint32_t  length;
    4158
    4259        char    * string = (char *)arg0;
    4360 
    4461        // check string in user space
    45         if( vmm_v2p_translate( false , string , &paddr ) ) return -1;
     62        if( vmm_v2p_translate( false , string , &paddr ) )
     63        {
     64            printk("\n[ERROR] in %s : string buffer %x unmapped\n",
     65            __FUNCTION__ , string );
     66            return -1;
     67        }
    4668
    4769        // ckeck string length
    48         if( hal_strlen_from_uspace( string ) >= 256 ) return -1;
     70        length = hal_strlen_from_uspace( string );
     71        if( length >= 256 )
     72        {
     73            printk("\n[ERROR] in %s : string length %d too large\n",
     74            __FUNCTION__ , length );
     75            return -1;
     76        }
    4977
    5078        // copy string in kernel space
    5179        hal_strcpy_from_uspace( kbuf , string , 256 );
    52 
    53         // get thread, process and core
    54         thread_t  * this    = CURRENT_THREAD;
    55         process_t * process = this->process;
    56         core_t    * core    = this->core;
    5780
    5881        // print message on TXT0 kernel terminal
     
    6083        this->trdid , process->pid , local_cxy, core->lid ,
    6184        (uint32_t)hal_get_cycles() , kbuf );
    62 
    63             return 0;
    6485    }
    6586    else if( type == DISPLAY_VMM )
     
    7091        xptr_t process_xp = cluster_get_reference_process_from_pid( pid );
    7192
    72             if( process_xp == XPTR_NULL ) return -1;
     93            if( process_xp == XPTR_NULL )
     94        {
     95            printk("\n[ERROR] in %s : undefined PID %x\n",
     96            __FUNCTION__ , pid );
     97            return -1;
     98        }
    7399
    74100        // get cluster and local pointer on process
     
    85111            rpc_vmm_display_client( process_cxy , process_ptr , true );
    86112        }
    87 
    88             return 0;
    89113    }
    90114    else if( type == DISPLAY_SCHED )
     
    94118
    95119        // check cluster argument
    96             if( cluster_is_undefined( cxy ) ) return -1;
     120            if( cluster_is_undefined( cxy ) )
     121        {
     122            printk("\n[ERROR] in %s : undefined cluster identifier %x\n",
     123            __FUNCTION__ , cxy );
     124            return -1;
     125        }
    97126
    98127        // check core argument
    99         if( lid >= LOCAL_CLUSTER->cores_nr ) return -1;
     128        if( lid >= LOCAL_CLUSTER->cores_nr )
     129        {
     130            printk("\n[ERROR] in %s : undefined local index %d\n",
     131            __FUNCTION__ , lid );
     132            return -1;
     133        }
    100134
    101         // call kernel function
    102135        if( cxy == local_cxy )
    103136        {
     
    108141            rpc_sched_display_client( cxy , lid );
    109142        }
    110 
    111             return 0;
    112143    }
    113144    else if( type == DISPLAY_PROCESS )
     
    116147
    117148        // check cluster argument
    118             if( cluster_is_undefined( cxy ) ) return -1;
     149            if( cluster_is_undefined( cxy ) )
     150        {
     151            printk("\n[ERROR] in %s : undefined cluster identifier %x\n",
     152            __FUNCTION__ , cxy );
     153            return -1;
     154        }
    119155
    120         // call kernel function
    121156        cluster_processes_display( cxy );
    122 
    123         return 0;
    124157    }
    125158    else if( type == DISPLAY_VFS )
     
    128161        process_t * process = CURRENT_THREAD->process;
    129162        vfs_display( process->vfs_root_xp );
    130 
    131         return 0;
    132163    }
    133164    else if( type == DISPLAY_CHDEV )
    134165    {
    135         // call kernel function
    136166        chdev_dir_display();
     167    }
     168    else
     169    {
     170        printk("\n[ERROR] in %s : undefined display type %x\n",
     171        __FUNCTION__ , type );
     172        return -1;
     173    }
    137174
    138         return 0;
    139     }
    140     else return -1;
     175#if CONFIG_DEBUG_SYS_DISPLAY
     176tm_end = hal_get_cycles();
     177if( CONFIG_DEBUG_SYS_DISPLAY < tm_end )
     178printk("\n[DBG] %s : thread %x exit / process %x / cost = %d / cycle %d\n",
     179__FUNCTION__, this, process->pid, (uint32_t)(tm_end - tm_start) , (uint32_t)tm_end );
     180#endif
    141181
    142 }  // end sys_get_sched()
     182    return 0;
     183
     184}  // end sys_display()
  • trunk/kernel/syscalls/sys_exec.c

    r421 r433  
    149149/////////////////////////////////////////////////////////////////////////////////////////
    150150// Implementation note:
    151 // This function build an exec_info_t structure containing all informations
     151// This function must be called by the main thread (thread 0 in owner cluster).
     152// IT build an exec_info_t structure containing all informations
    152153// required to initialize the new process descriptor and the associated thread.
    153 // It includes the process PID (unchanged), main() arguments, environment variables,
     154// It includes the process main() arguments, the environment variables,
    154155// and the pathname to the new process .elf file.
    155156// It calls the process_exec_get_strings() functions to copy the main() arguments and
     
    169170    error_t       error;
    170171
    171     // get parent process pid
     172    // get calling thread, process, & pid
    172173    thread_t    * this    = CURRENT_THREAD;
    173174    process_t   * process = this->process;
    174175    pid_t         pid     = process->pid;
    175176
    176 #if CONFIG_SYSCALL_DEBUG
     177    assert( (CXY_FROM_PID( pid ) == local_cxy) , __FUNCTION__ ,
     178    "must be called in the owner cluster\n");
     179
     180    assert( (LTID_FROM_TRDID( this->trdid ) == 0) , __FUNCTION__ ,
     181    "must be called by the main thread\n");
     182
     183    assert( (args == NULL) , __FUNCTION__ ,
     184    "args not supported yet\n" );
     185
     186    assert( (envs == NULL) , __FUNCTION__ ,
     187    "args not supported yet\n" );
     188
     189    // get owner cluster
     190
     191    // check pathname length
     192    if( hal_strlen_from_uspace( pathname ) >= CONFIG_VFS_MAX_PATH_LENGTH )
     193    {
     194
     195#if CONFIG_DEBUG_SYSCALLS_ERROR
     196printk("\n[ERROR] in %s : pathname too long\n", __FUNCTION__ );
     197#endif
     198        this->errno = ENFILE;
     199        return -1;
     200    }
     201
     202    // copy pathname in exec_info structure (kernel space)
     203    hal_strcpy_from_uspace( exec_info.path , pathname , CONFIG_VFS_MAX_PATH_LENGTH );
     204
     205#if CONFIG_DEBUG_SYS_EXEC
    177206uint64_t      tm_start;
    178207uint64_t      tm_end;
    179208tm_start = hal_get_cycles();
    180 printk("\n[DBG] %s : core[%x,%d] enter / process %x / cycle = %d\n",
    181 __FUNCTION__, local_cxy, this->core->lid, pid, (uint32_t)tm_start );
    182 #endif
    183 
    184     // get owner cluster
    185     cxy_t  owner_cxy = CXY_FROM_PID( pid );
    186 
    187     // check pathname length
    188     if( hal_strlen_from_uspace( pathname ) >= CONFIG_VFS_MAX_PATH_LENGTH )
    189     {
    190         printk("\n[ERROR] in %s : pathname too long\n", __FUNCTION__ );
    191         this->errno = ENFILE;
    192         return -1;
    193     }
    194 
    195     // copy pathname in exec_info structure (kernel space)
    196     hal_strcpy_from_uspace( exec_info.path , pathname , CONFIG_VFS_MAX_PATH_LENGTH );
    197 
    198     // check args argument
    199     assert( (args == NULL) , __FUNCTION__ ,
    200     "args not supported yet\n" );
    201 
    202     // check envs argument
    203     assert( (envs == NULL) , __FUNCTION__ ,
    204     "args not supported yet\n" );
     209if( CONFIG_DEBUG_SYS_EXEC < tm_start )
     210printk("\n[DBG] %s : thread %x enter / process %x / path %s / cycle = %d\n",
     211__FUNCTION__, this, pid, exec_info.path, (uint32_t)tm_start );
     212#endif
    205213
    206214    // check and store args in exec_info structure if required
     
    209217        if( process_exec_get_strings( &exec_info , true , args ) )
    210218        {
    211             printk("\n[ERROR] in %s : cannot access args\n", __FUNCTION__ );
     219
     220#if CONFIG_DEBUG_SYSCALLS_ERROR
     221printk("\n[ERROR] in %s : cannot access args\n", __FUNCTION__ );
     222#endif
    212223            this->errno = error;
    213224            return -1;
     
    220231        if( process_exec_get_strings( &exec_info , false , envs ) )
    221232        {
    222             printk("\n[ERROR] in %s : cannot access envs\n", __FUNCTION__ );
     233
     234#if CONFIG_DEBUG_SYCALLS_ERROR
     235printk("\n[ERROR] in %s : cannot access envs\n", __FUNCTION__ );
     236#endif
    223237            this->errno = error;
    224238            return -1;
     
    226240    }
    227241
    228     // register PID in exec_info
    229     exec_info.pid = pid;
    230 
    231     // call process_make_exec (local or remote)
    232     if( owner_cxy == local_cxy )
    233     {
    234         error = process_make_exec( &exec_info );
    235     }
    236     else
    237     {
    238         rpc_process_make_exec_client( owner_cxy,
    239                                       &exec_info,
    240                                       &error );
    241     }
     242    // call relevant kernel function
     243    error = process_make_exec( &exec_info );
    242244
    243245    if( error )
    244246    {
    245         printk("\n[ERROR] in %s : cannot create new process %x in cluster %x\n",
    246         __FUNCTION__, pid, owner_cxy );
     247
     248#if CONFIG_DEBUG_SYSCALLS_ERROR
     249printk("\n[ERROR] in %s : cannot create process %x in cluster %x\n",
     250__FUNCTION__, pid, CXY_FROM_PID( pid );
     251#endif
    247252        this->errno = error;
    248253        return -1;
    249254    }
    250255
    251 #if CONFIG_SYSCALL_DEBUG
     256#if CONFIG_DEBUG_SYS_EXEC
    252257tm_end = hal_get_cycles();
    253 printk("\n[DBG] %s : core[%x,%d] exit / process %x / path = %s / cost = %d / cycle %d\n",
    254 __FUNCTION__, local_cxy, this->core->lid, pid, exec_info.path,
    255 (uint32_t)(tm_end - tm_start) , (uint32_t)tm_end );
    256 #endif
    257 
    258     return 0;
     258if( CONFIG_DEBUG_SYS_EXEC < tm_end )
     259printk("\n[DBG] %s : thread %x exit / process %x / cost = %d / cycle %d\n",
     260__FUNCTION__, this, pid, (uint32_t)(tm_end - tm_start), (uint32_t)tm_end );
     261#endif
     262
     263    // deschedule <=> old thread suicide because the BLOCKED_GLOBAL
     264    // and the FLAG_REQ_DELETE have been set by process_make_exec()
     265    sched_yield( "old process suicide in sys_exec()" );
     266
     267    assert( false , __FUNCTION__ , "This code should not be executed\n" );
     268
     269    return 0; 
    259270
    260271} // end sys_exec()
  • trunk/kernel/syscalls/sys_exit.c

    r416 r433  
    3636int sys_exit( uint32_t status )
    3737{
    38     uint32_t    save_sr;       // required to enable IRQs
     38    reg_t       save_sr;       // required to enable IRQs
    3939
    40     thread_t  * this = CURRENT_THREAD;
    41     pid_t       pid  = this->process->pid;
     40    thread_t  * this    = CURRENT_THREAD;
     41    process_t * process = this->process;
     42    pid_t       pid     = process->pid;
    4243
    43 #if CONFIG_SYSCALL_DEBUG
     44#if CONFIG_DEBUG_SYS_EXIT
    4445uint64_t    tm_start;
    4546uint64_t    tm_end;
    4647tm_start = hal_get_cycles();
    47 printk("\n[DBG] %s : core[%x,%d] enter / process %x / status %x / cycle %d\n",
    48 __FUNCTION__ , local_cxy , this->core->lid , pid , status , (uint32_t)tm_start );
     48if( CONFIG_DEBUG_SYS_EXIT < tm_start )
     49printk("\n[DBG] %s : thread %x enter / process %x / status %x / cycle %d\n",
     50__FUNCTION__ , this, pid , status , (uint32_t)tm_start );
    4951#endif
    5052
    51     // get owner process cluster
    52     cxy_t   owner_cxy  = CXY_FROM_PID( pid );
     53    // get cluster and pointers on process in owner cluster
     54    xptr_t      owner_xp  = cluster_get_owner_process_from_pid( pid );
     55    cxy_t       owner_cxy = GET_CXY( owner_xp );
     56    process_t * owner_ptr = GET_PTR( owner_xp );
     57
     58    assert( (owner_xp != XPTR_NULL) , __FUNCTION__ , "owner_xp cannot be NULL\n" );
    5359
    5460    // enable IRQs
    5561    hal_enable_irq( &save_sr );
    5662
    57     // execute process_make_exit() function in owner cluster
    58     if( local_cxy == owner_cxy )                                // owner is local
    59     {
    60         process_make_exit( pid , status );
    61     }
    62     else                                                        // owner is remote
    63     {
    64         rpc_process_make_exit_client( owner_cxy, pid , status );
    65     }
     63    // the process_make_kill() function must be executed
     64    // by an RPC thread in reference cluster
     65    rpc_process_make_kill_client( owner_cxy, owner_ptr, true , status );
    6666
    6767    // restore IRQs
     
    7070    hal_fence();
    7171
    72 #if CONFIG_SYSCALL_DEBUG
     72#if CONFIG_DEBUG_SYS_EXIT
    7373tm_end = hal_get_cycles();
    74 printk("\n[DBG] %s : core[%x,%d] exit / process %x / status %x / cost = %d\n",
    75 __FUNCTION__ , local_cxy , this->core->lid , pid , status , (uint32_t)(tm_end - tm_start) );
     74if( CONFIG_DEBUG_SYS_EXIT < tm_end )
     75printk("\n[DBG] %s : thread %x exit / process %x / status %x / cost = %d / cycle %d\n",
     76__FUNCTION__, this, pid, status, (uint32_t)(tm_end - tm_start), (uint32_t)tm_end );
    7677#endif
    7778
  • trunk/kernel/syscalls/sys_fork.c

    r416 r433  
    6363    parent_pid         = parent_process_ptr->pid;
    6464
    65 #if CONFIG_SYSCALL_DEBUG
     65#if CONFIG_DEBUG_SYS_FORK
    6666uint64_t          tm_start;
    6767uint64_t          tm_end;
    6868tm_start = hal_get_cycles();
    69 printk("\n[DBG] %s : core[%x,%d] enter / process %x / cycle =  %d\n",
    70 __FUNCTION__, local_cxy, parent_thread_ptr->core->lid, parent_pid,
    71 (uint32_t)tm_start );
     69if( CONFIG_DEBUG_SYS_FORK < tm_start )
     70printk("\n[DBG] %s : thread %x enter / parent %x / cycle =  %d\n",
     71__FUNCTION__, parent_thread_ptr, parent_pid, (uint32_t)tm_start );
    7272#endif
    7373
     
    148148        thread_unblock( XPTR( target_cxy , child_thread_ptr ) , THREAD_BLOCKED_GLOBAL );
    149149
    150 #if CONFIG_SYSCALL_DEBUG
     150#if CONFIG_DEBUG_SYS_FORK
    151151tm_end = hal_get_cycles();
    152 printk("\n[DBG] %s : core[%x,%d] parent_process %x exit / cost = %d\n",
    153 __FUNCTION__, local_cxy, parent_thread_ptr->core->lid,  parent_pid,
    154 (uint32_t)(tm_end - tm_start) );
     152if( CONFIG_DEBUG_SYS_FORK < tm_end )
     153printk("\n[DBG] %s : parent_thread %x exit / cost = %d / cycle %d\n",
     154__FUNCTION__ , parent_thread_ptr, (uint32_t)(tm_end - tm_start), (uint32_t)tm_end );
    155155#endif
    156156
     
    160160    {
    161161
    162 #if CONFIG_SYSCALL_DEBUG
     162#if CONFIG_DEBUG_SYS_FORK
    163163tm_end = hal_get_cycles();
    164 printk("\n[DBG] %s : core[%x,%d] child process %x exit / cost =  %d\n",
    165 __FUNCTION__, local_cxy, parent_thread_ptr->core->lid, child_pid,
    166 (uint32_t)(tm_end - tm_start) );
     164if( CONFIG_DEBUG_SYS_FORK < tm_end )
     165printk("\n[DBG] %s : child_thread %x exit / cost = %d / cycle %d\n",
     166__FUNCTION__ , child_thread_ptr, (uint32_t)(tm_end - tm_start), (uint32_t)tm_end );
    167167#endif
    168168
  • trunk/kernel/syscalls/sys_kill.c

    r421 r433  
    3838{
    3939    uint32_t    save_sr;       // required to enable IRQs
    40     xptr_t      process_xp;    // extended pointer on target reference process
    41     cxy_t       process_cxy;   // target process cluster
    42     process_t * process_ptr;   // local pointer on target process
     40    xptr_t      owner_xp;      // extended pointer on target reference process
     41    cxy_t       owner_cxy;     // target process cluster
     42    process_t * owner_ptr;     // local pointer on target process
    4343    xptr_t      parent_xp;     // extended pointer on parent process
    4444    cxy_t       parent_cxy;    // parent process cluster
    4545    process_t * parent_ptr;    // local pointer on parent process
    4646    pid_t       ppid;          // parent process PID
     47    uint32_t    retval;        // return value for the switch
    4748
    4849    thread_t  * this    = CURRENT_THREAD;
    4950
    50 #if CONFIG_SYSCALL_DEBUG
     51#if CONFIG_DEBUG_SYS_KILL
    5152uint64_t    tm_start;
    5253uint64_t    tm_end;
    5354tm_start = hal_get_cycles();
    54 printk("\n[DBG] %s : core[%x,%d] enter / process %x / sig %d / cycle %d\n",
    55 __FUNCTION__ , local_cxy , this->core->lid , pid, sig_id, (uint32_t)tm_start );
     55if( CONFIG_DEBUG_SYS_KILL < tm_start )
     56printk("\n[DBG] %s : thread %x enter / process %x / sig %d / cycle %d\n",
     57__FUNCTION__ , this, pid, sig_id, (uint32_t)tm_start );
    5658#endif
    5759
    58     // get cluster and pointers on reference process
    59     process_xp  = cluster_get_reference_process_from_pid( pid );
    60     process_cxy = GET_CXY( process_xp );
    61     process_ptr = (process_t *)GET_PTR( process_xp );
     60    // get cluster and pointers on owner process
     61    owner_xp  = cluster_get_owner_process_from_pid( pid );
     62    owner_cxy = GET_CXY( owner_xp );
     63    owner_ptr = GET_PTR( owner_xp );
    6264
    6365    // check process existence
    64     if( process_xp == XPTR_NULL )
     66    if( owner_xp == XPTR_NULL )
    6567    {
    66         syscall_dmsg("\n[ERROR] in %s : process %x not found\n",
    67         __FUNCTION__ , pid );
     68
     69syscall_dmsg("\n[ERROR] in %s : process %x not found\n", __FUNCTION__ , pid );
     70
    6871        this->errno = EINVAL;
    6972        return -1;
     
    7174   
    7275    // get parent process PID
    73     parent_xp  = hal_remote_lwd( XPTR( process_cxy , &process_ptr->parent_xp ) );
     76    parent_xp  = hal_remote_lwd( XPTR( owner_cxy , &owner_ptr->parent_xp ) );
    7477    parent_cxy = GET_CXY( parent_xp );
    7578    parent_ptr = GET_PTR( parent_xp );
     
    7982    if( ppid < 2 )
    8083    {
    81         syscall_dmsg("\n[ERROR] in %s : process %x cannot be killed\n",
    82         __FUNCTION__ , pid );
    83                 this->errno = EINVAL;
    84         return -1;
    85     }
    8684
    87     // does nothing if sig_id == 0
    88     if( sig_id == 0 )  return 0;
    89    
    90     // check sig_id
    91     if( (sig_id != SIGSTOP) && (sig_id != SIGCONT) && (sig_id != SIGKILL) )
    92     {
    93         syscall_dmsg("\n[ERROR] in %s : illegal signal type for process %x\n",
    94         __FUNCTION__ , sig_id , pid );
     85syscall_dmsg("\n[ERROR] in %s : process %x cannot be killed\n", __FUNCTION__ , pid );
     86
    9587                this->errno = EINVAL;
    9688        return -1;
     
    10092    hal_enable_irq( &save_sr );
    10193
    102     // execute process_make_kill() function in owner cluster
    103     if( local_cxy == process_cxy )                            // owner cluster is local
     94    // analyse signal type
     95    // supported values are : 0, SIGSTOP, SIGCONT, SIGKILL
     96    switch( sig_id )
    10497    {
    105         process_make_kill( pid , sig_id );
     98        case 0 :
     99        {
     100            // does nothing
     101            retval = 0;
     102            break;
     103        }
     104        case SIGSTOP:     
     105        {
     106            // remove TXT ownership from target process
     107            process_txt_reset_ownership( owner_xp );
     108
     109            // block all threads in all clusters
     110            process_sigaction( owner_ptr , BLOCK_ALL_THREADS );
     111
     112            // atomically update reference process termination state
     113            hal_remote_atomic_or( XPTR( owner_cxy , &owner_ptr->term_state ) ,
     114                                  PROCESS_FLAG_BLOCK );
     115 
     116            retval = 0;
     117            break;
     118        }
     119        case SIGCONT:
     120        {
     121            // unblock all threads in all clusters
     122            process_sigaction( owner_ptr , UNBLOCK_ALL_THREADS );
     123
     124            // atomically update reference process termination state
     125            hal_remote_atomic_and( XPTR( owner_cxy , &owner_ptr->term_state ) ,
     126                                   ~PROCESS_FLAG_BLOCK );
     127            retval = 0;
     128            break;
     129        }
     130        break;
     131        case SIGKILL:
     132        {
     133            // the process_make_kill() function must be executed
     134            // by an RPC thread in process owner cluster
     135            // It deletes all target process threads in all clusters,
     136            // and updates the process termination state
     137            rpc_process_make_kill_client( owner_cxy , owner_ptr , false , 0 );
     138
     139            retval = 0;
     140            break;
     141        }
     142        default:
     143        {
     144
     145syscall_dmsg("\n[ERROR] in %s : illegal signal type %d for process %x\n",
     146__FUNCTION__ , sig_id , pid );
     147
     148            this->errno = EINVAL;
     149            retval = -1;
     150            break;
     151        }
    106152    }
    107     else                                                      // owner cluster is remote
    108     {
    109         rpc_process_make_kill_client( process_cxy , pid , sig_id );
    110     }
    111 
     153   
    112154    // restore IRQs
    113155    hal_restore_irq( save_sr );
     
    115157    hal_fence();
    116158
    117 #if CONFIG_SYSCALL_DEBUG
     159#if CONFIG_DEBUG_SYS_KILL
    118160tm_end = hal_get_cycles();
    119 printk("\n[DBG] %s : core[%x,%d] exit / process %x / sig %d / cost = %d\n",
    120 __FUNCTION__ , local_cxy , this->core->lid , pid, sig_id, (uint32_t)(tm_end - tm_start) );
     161if( CONFIG_DEBUG_SYS_KILL < tm_end )
     162printk("\n[DBG] %s : thread %x enter / process %x / sig %d / cost = %d / cycle %d\n",
     163__FUNCTION__ , this, pid, sig_id, (uint32_t)(tm_end - tm_start), (uint32_t)tm_end );
    121164#endif
    122  
    123         return 0;
     165
     166        return retval;
    124167
    125168}  // end sys_kill()
  • trunk/kernel/syscalls/sys_read.c

    r421 r433  
    6565
    6666#if CONFIG_READ_DEBUG
     67enter_sys_read = (uint32_t)tm_start;
     68#endif
     69
     70        thread_t  *  this    = CURRENT_THREAD;
     71        process_t *  process = this->process;
     72 
     73#if CONFIG_DEBUG_SYS_READ
    6774uint64_t     tm_start;
    6875uint64_t     tm_end;
    6976tm_start = hal_get_cycles();
    70 #endif
    71 
    72 #if CONFIG_READ_DEBUG
    73 enter_sys_read = (uint32_t)tm_start;
    74 #endif
    75 
    76         thread_t  *  this    = CURRENT_THREAD;
    77         process_t *  process = this->process;
    78  
     77if( CONFIG_DEBUG_SYS_READ < tm_start )
     78printk("\n[DBG] %s : thread %d enter / process %x / vaddr = %x / count %d / cycle %d\n",
     79__FUNCTION__, this, process->pid, vaddr, count, (uint32_t)tm_start );
     80#endif
     81
    7982    // check file_id argument
    8083        if( file_id >= CONFIG_PROCESS_FILE_MAX_NR )
     
    188191    hal_fence();
    189192
    190 #if CONFIG_READ_DEBUG
     193#if CONFIG_DEBUG_SYS_READ
    191194tm_end = hal_get_cycles();
    192 printk("\n[DBG] %s : core[%x,%d] / thread %x in process %x / cycle %d\n"
     195if( CONFIG_DEBUG_SYS_READ < tm_end )
     196printk("\n[DBG] %s : thread %x / process %x / cycle %d\n"
    193197"nbytes = %d / first byte = %c / file_id = %d / cost = %d\n",
    194198__FUNCTION__ , local_cxy , this->core->lid , this->trdid , this->process->pid ,
  • trunk/kernel/syscalls/sys_thread_exit.c

    r409 r433  
    8787            thread_block( this , THREAD_BLOCKED_JOIN );
    8888
    89             // release the lock protecting the flags
     89            // release the lock protecting the join
    9090                remote_spinlock_unlock( XPTR( local_cxy, &this->join_lock ) );
    9191
  • trunk/kernel/syscalls/sys_wait.c

    r421 r433  
    4141    pid_t       child_pid;
    4242    int         child_state;
     43    thread_t  * child_thread;
    4344
    4445    thread_t  * this    = CURRENT_THREAD;
    4546    process_t * process = this->process;
     47    pid_t       pid     = process->pid;
    4648
    47 #if CONFIG_SYSCALL_DEBUG
     49#if CONFIG_DEBUG_SYS_WAIT
    4850uint64_t    tm_start;
    4951uint64_t    tm_end;
    5052tm_start = hal_get_cycles();
    51 printk("\n[DBG] %s : core[%x,%d] enter / process %x / cycle %d\n",
    52 __FUNCTION__ , local_cxy , this->core->lid , process->pid, (uint32_t)tm_start );
     53if( CONFIG_DEBUG_SYS_WAIT < tm_start )
     54printk("\n[DBG] %s : thread %x enter / process %x / cycle %d\n",
     55__FUNCTION__, this, process->pid, (uint32_t)tm_start );
    5356#endif
    5457
     
    6467        }
    6568
    66     // get cluster and local pointer on reference process
    67     xptr_t      ref_xp  = process->ref_xp;
    68     cxy_t       ref_cxy = GET_CXY( ref_xp );
    69     process_t * ref_ptr = GET_PTR( ref_xp );
     69    // get process owner cluster
     70    cxy_t owner_cxy = CXY_FROM_PID( pid );
    7071
    71     // get extended pointer on children list root
    72     xptr_t root_xp = XPTR( ref_cxy , &ref_ptr->children_root );
     72    // This function must be executed in owner cluster
     73    assert( (owner_cxy == local_cxy) , __FUNCTION__ ,
     74    "calling thread must execute in owner cluster" );
    7375
    74     // get extended pointer on lock protecting the children list
    75     xptr_t lock_xp = XPTR( ref_cxy , &ref_ptr->children_lock );
     76    // This function must be executed by the main thread
     77    assert( (process->th_tbl[0] == this) , __FUNCTION__ ,
     78    "this function must be executed by the main thread" );
     79   
     80    // get extended pointer on children list root and lock
     81    xptr_t children_root_xp = XPTR( owner_cxy , &process->children_root );
     82    xptr_t children_lock_xp = XPTR( owner_cxy , &process->children_lock );
    7683
    7784    // exit this blocking loop only when a child processes change state
    7885    while( 1 )
    7986    {
    80         // get lock
    81         remote_spinlock_lock( lock_xp );
     87        // get lock protecting children list
     88        remote_spinlock_lock( children_lock_xp );
    8289
    83         // scan the list of child process
    84         XLIST_FOREACH( root_xp , iter_xp )
     90        // scan the list of owner child process
     91        XLIST_FOREACH( children_root_xp , iter_xp )
    8592        {
    86             // get child process cluster and local pointer
     93            // get child process owner cluster and local pointer
    8794            child_xp  = XLIST_ELEMENT( iter_xp , process_t , children_list );
    8895            child_ptr = GET_PTR( child_xp );
    8996            child_cxy = GET_CXY( child_xp );
    9097
    91             // get the child PID
    92             child_pid = (int)hal_remote_lw( XPTR( child_cxy , &child_ptr->pid ) );
     98            // get term_state from child owner process
     99            child_state = (int)hal_remote_lw ( XPTR(child_cxy,&child_ptr->term_state));
    93100
    94             // get the child process state
    95             child_state = hal_remote_lw( XPTR( child_cxy , &child_ptr->state ) );
     101            // test if child process is terminated,
     102            // but termination not yet reported to parent process
     103            if( ((child_state & PROCESS_FLAG_EXIT)   ||
     104                 (child_state & PROCESS_FLAG_KILL)   ||
     105                 (child_state & PROCESS_FLAG_BLOCK)) &&
     106                 ((child_state & PROCESS_FLAG_WAIT) == 0) )
     107            {
     108                // get pointer on main thread and PID from child owner process
     109                child_pid    = (pid_t)     hal_remote_lw ( XPTR(child_cxy,&child_ptr->pid));
     110                child_thread = (thread_t *)hal_remote_lpt( XPTR(child_cxy,&child_ptr->th_tbl[0]));
    96111
    97             // check child process state
    98             if( child_state != PROCESS_STATE_RUNNING )
    99             {
    100                 // release lock
    101                 remote_spinlock_unlock( lock_xp );
     112                // set the PROCESS_FLAG_WAIT in owner child descriptor
     113                hal_remote_atomic_or( XPTR( child_cxy , &child_ptr->term_state ),
     114                                      PROCESS_FLAG_WAIT );
    102115
    103 #if CONFIG_SYSCALL_DEBUG
     116                // set the THREAD_FLAG_REQ_DELETE in child main thread
     117                hal_remote_atomic_or( XPTR( child_cxy , &child_thread->flags ) ,
     118                                            THREAD_FLAG_REQ_DELETE );
     119
     120#if CONFIG_DEBUG_SYS_WAIT
    104121tm_end = hal_get_cycles();
    105 printk("\n[DBG] %s : core[%x,%d] exit / process %x / cost = %d\n",
    106 __FUNCTION__ , local_cxy, this->core->lid, process->pid, (uint32_t)(tm_end - tm_start) );
     122if( CONFIG_DEBUG_SYS_WAIT < tm_end )
     123printk("\n[DBG] %s : thread %x exit / process %x / cycle %d\n",
     124__FUNCTION__, this, process->pid, (uint32_t)tm_end );
    107125#endif
    108126
    109                 // return relevant info to process
    110                 hal_copy_to_uspace( status , &child_state , sizeof(int) );
    111                 return child_pid;
     127                 // return relevant info to calling parent process
     128                 hal_copy_to_uspace( status , &child_state , sizeof(int) );
     129                 return child_pid;
    112130            }
    113131        }
    114132       
    115133        // release lock
    116         remote_spinlock_unlock( lock_xp );
     134        remote_spinlock_unlock( children_lock_xp );
    117135
    118         // block the calling thread until a child process change state
    119         thread_block( this , THREAD_BLOCKED_WAIT );
    120         sched_yield( "wait child termination" );
     136        // deschedule without blocking
     137        sched_yield( "parent wait children termination" );
    121138    }
    122139
  • trunk/kernel/syscalls/sys_write.c

    r421 r433  
    4646    reg_t        save_sr;         // required to enable IRQs during syscall
    4747
    48 #if CONFIG_WRITE_DEBUG
     48        thread_t   * this = CURRENT_THREAD;
     49        process_t  * process = this->process;
     50
     51#if CONFIG_DEBUG_SYS_WRITE
    4952uint32_t     tm_start;
    5053uint32_t     tm_end;
    5154tm_start = hal_get_cycles();
     55if( CONFIG_DEBUG_SYS_WRITE < tm_start )
     56printk("\n[DBG] %s : thread %x / process %x / vaddr %x / count %d / cycle %d\n",
     57__FUNCTION__, this, process->pid, vaddr, count, (uint32_t)tm_start );
    5258#endif
    53 
    54         thread_t   * this = CURRENT_THREAD;
    55         process_t  * process = this->process;
    5659 
    5760    // check file_id argument
    5861        if( file_id >= CONFIG_PROCESS_FILE_MAX_NR )
    5962        {
    60         printk("\n[ERROR] in %s : illegal file descriptor index\n", __FUNCTION__ );
     63
     64#if CONFIG_DEBUG_SYSCALLS_ERROR
     65printk("\n[ERROR] in %s : illegal file descriptor index\n", __FUNCTION__ );
     66#endif
    6167        this->errno = EBADFD;
    6268                return -1;
     
    6874    if ( error )
    6975    {
    70         printk("\n[ERROR] in %s : user buffer unmapped = %x\n",
    71         __FUNCTION__ , (intptr_t)vaddr );
     76
     77#if CONFIG_DEBUG_SYSCALLS_ERROR
     78printk("\n[ERROR] in %s : user buffer unmapped = %x\n", __FUNCTION__ , (intptr_t)vaddr );
     79#endif
    7280                this->errno = EINVAL;
    7381                return -1;
     
    8290    if( file_xp == XPTR_NULL )
    8391    {
    84         printk("\n[ERROR] in %s : undefined file descriptor index = %d in process %x\n",
    85         __FUNCTION__ , file_id , process->pid );
     92
     93#if CONFIG_DEBUG_SYSCALLS_ERROR
     94printk("\n[ERROR] in %s : undefined file descriptor index = %d in process %x\n",
     95__FUNCTION__ , file_id , process->pid );
     96#endif
    8697                this->errno = EBADFD;
    8798                return -1;
     
    103114        if( (attr & FD_ATTR_WRITE_ENABLE) == 0 )
    104115            {
    105             printk("\n[ERROR] in %s : file %d not writable in process %x\n",
    106             __FUNCTION__ , file_id , process->pid );
     116
     117#if CONFIG_DEBUG_SYSCALLS_ERROR
     118printk("\n[ERROR] in %s : file %d not writable in process %x\n",
     119__FUNCTION__ , file_id , process->pid );
     120#endif
    107121                    this->errno = EBADFD;
    108122                    return -1;
     
    131145    if( nbytes != count )
    132146    {
    133         printk("\n[ERROR] in %s cannot write data to file %d in process %x\n",
    134         __FUNCTION__ , file_id , process->pid );
     147
     148#if CONFIG_DEBUG_SYSCALLS_ERROR
     149printk("\n[ERROR] in %s cannot write data to file %d in process %x\n",
     150__FUNCTION__ , file_id , process->pid );
     151#endif
    135152        this->errno = error;
    136153        return -1;
     
    142159    hal_fence();
    143160
    144 #if CONFIG_WRITE_DEBUG
     161#if CONFIG_DEBUG_SYS_WRITE
    145162tm_end = hal_get_cycles();
    146 printk("\n[DBG] %s : core[%x,%d] / thread %x in process %x / cycle %d\n"
     163if( CONFIG_DEBUG_SYS_WRITE < tm_end )
     164printk("\n[DBG] %s : thread %x in process %x / cycle %d\n"
    147165"nbytes = %d / first byte = %c / file_id = %d / cost = %d\n",
    148 __FUNCTION__ , local_cxy , this->core->lid , this->trdid , this->process->pid ,
    149 (uint32_t)tm_start , nbytes , *((char *)(intptr_t)paddr) , file_id ,
    150 (uint32_t)(tm_end - tm_start) );
     166__FUNCTION__, this, process->pid, (uint32_t)tm_start,
     167nbytes, *((char *)(intptr_t)paddr) , file_id , (uint32_t)(tm_end - tm_start) );
    151168#endif
    152169 
    153 #if (CONFIG_WRITE_DEBUG & 0x1)
    154 printk("\n@@@@@@@@@@@@ timing to write character %c\n"
    155 " - enter_sys_write    = %d\n"
    156 " - exit_sys_write     = %d\n",
    157 *((char *)(intptr_t)paddr) , (uint32_t)tm_start , (uint32_t)tm_end );
    158 #endif
    159 
    160170        return nbytes;
    161171
  • trunk/kernel/syscalls/syscalls.h

    r421 r433  
    171171/******************************************************************************************
    172172 * [10] This function implement the exit system call terminating a POSIX process.
     173 * In the present implementation, this function implements actually the _exit():
     174 * - it does not flush open ourput steams.
     175 * - it does not close open streams.
    173176 ******************************************************************************************
    174177 * @ status   : terminaison status (not used in present implementation).
     
    421424
    422425/******************************************************************************************
    423  * [34] This function implements the "kill" system call.
     426 * [34] This function implements the "kill" system call on the kernel side.
    424427 * It register the signal defined by the <sig_id> argument in all thread descriptors
    425428 * of a target process identified by the <pid> argument. This is done in all clusters
     
    432435 ******************************************************************************************
    433436 * @ pid      : target process identifier.
    434  * @ sig_id   : index defining the signal type (from 1 to 31).
     437 * @ sig_id   : index defining the signal type.
    435438 * @ return 0 if success / returns -1 if failure.
    436439 *****************************************************************************************/
     
    439442
    440443/******************************************************************************************
    441  * [35] This function implements the "getpid" system call.
     444 * [35] This function implements the "getpid" system call on the kernel side.
    442445 ******************************************************************************************
    443446 * @ returns the process PID for the calling thread.
     
    446449
    447450/******************************************************************************************
    448  * [36] This function implement the "fork" system call.
    449  * The calling process descriptor (parent process), and the associated thread descriptor are
    450  * replicated in the same cluster as the calling thread, but the new process (child process)
    451  * is registered in another target cluster, that is the new process owner.
    452  * The child process and the associated main thread will be migrated to the target cluster
    453  * later, when the child process makes an "exec" or any other system call... TODO [AG]
     451 * [36] This function implement the "fork" system call on the kernel side.
     452 * The calling process descriptor (parent process), and the associated thread descriptor
     453 * are replicated in a - likely - remote cluster, that becomes the child process owner.
     454 * The child process get a new PID, and is linked to the parent PID. The child process
     455 * inherit from its parent the memory image, and all open files (including the TXT).
     456 * The child process becomes the TXT terminal owner.
    454457 * The target cluster depends on the "fork_user" flag and "fork_cxy" variable that can be
    455458 * stored in the calling thread descriptor by the specific fork_place() system call.
    456  * If not, the sys_fork() function makes a query to the DQDT to select the target cluster.
     459 * If not, the kernel function makes a query to the DQDT to select the target cluster.
    457460 ******************************************************************************************
    458461 * @ if success, returns child process PID to parent, and return O to child.
     
    462465
    463466/******************************************************************************************
    464  * [37] This function implement the "exec" system call, that creates a new process
    465  * descriptor.
    466  * It is executed in the client cluster, but the new process descriptor and the main
    467  * thread are created in a server cluster, that is generally another cluster.
    468  * - if the server_cluster is the client cluster, it calls directly the process_make_exec()
    469  *   function to create a new process, and launch a new thread in local cluster.
    470  * - if the target_cluster is remote, it calls the rpc_process_exec_client() to execute
    471  *   process_signedmake_exec() on the remote cluster.
    472  * In both case this function build an exec_info_t structure containing all informations
    473  * required to build the new process descriptor and the associated thread.
    474  * Finally, the calling process and thread are deleted.
     467 * [37] This function implement the "exec" system call on the kernel side.
     468 * It creates, in the same cluster as the calling thread, a new process descriptor,
     469 * and a new associated main thread descriptor, executing a new memory image defined
     470 * by the <filename> argument. This new process inherit from the old process the PID
     471 * and the PPID, as well as all open files (including the TXT).
     472 * The old process descriptor, and all its threads are blocked, and marked for deletion.
     473 * Therefore the exec syscall does not return to the calling thread in case of success.
     474 * This function build an exec_info_t structure containing the new process arguments,
     475 * as defined by the <arv> argument, and the new process environment variables,
     476 * as defined by the <envp>  argument.
     477 * TODO : the <argv> and <envp> arguments are not supported yet (both must be NULL).
    475478 ******************************************************************************************
    476479 * @ filename : string pointer on .elf filename (pointer in user space)
    477480 * @ argv     : array of strings on process arguments (pointers in user space)
    478481 * @ envp     : array of strings on environment v