Changeset 440 for trunk/kernel


Ignore:
Timestamp:
May 3, 2018, 5:51:22 PM (6 years ago)
Author:
alain
Message:

1/ Fix a bug in the Multithreaded "sort" applicationr:
The pthread_create() arguments must be declared as global variables.
2/ The exit syscall can be called by any thread of a process..

Location:
trunk/kernel
Files:
47 edited

Legend:

Unmodified
Added
Removed
  • trunk/kernel/Makefile

    r439 r440  
    88endif
    99
    10 #We choose drivers and hal file we need to link with kernel.elf
     10#We choose drivers and hal file to be linked with kernel.elf
    1111ifeq ($(ARCH_NAME), tsar_mips32)
     12
    1213  DRIVERS_OBJS = $(HAL_ARCH)/build/drivers/soclib_tty.o  \
    13                 $(HAL_ARCH)/build/drivers/soclib_bdv.o  \
    14                 $(HAL_ARCH)/build/drivers/soclib_hba.o  \
    15                 $(HAL_ARCH)/build/drivers/soclib_mmc.o  \
    16                 $(HAL_ARCH)/build/drivers/soclib_pic.o  \
    17                 $(HAL_ARCH)/build/drivers/soclib_nic.o  \
    18                 $(HAL_ARCH)/build/drivers/soclib_dma.o  \
    19                 $(HAL_ARCH)/build/drivers/soclib_iob.o
    20 
    21   CORE_OBJS    = $(HAL_ARCH)/build/core/hal_special.o      \
    22                 $(HAL_ARCH)/build/core/hal_context.o      \
    23                 $(HAL_ARCH)/build/core/hal_atomic.o       \
    24                 $(HAL_ARCH)/build/core/hal_remote.o       \
    25                 $(HAL_ARCH)/build/core/hal_uspace.o       \
    26                 $(HAL_ARCH)/build/core/hal_irqmask.o      \
    27                 $(HAL_ARCH)/build/core/hal_gpt.o          \
    28                 $(HAL_ARCH)/build/core/hal_ppm.o          \
    29                 $(HAL_ARCH)/build/core/hal_vmm.o          \
    30                 $(HAL_ARCH)/build/core/hal_exception.o    \
    31                 $(HAL_ARCH)/build/core/hal_interrupt.o    \
    32                 $(HAL_ARCH)/build/core/hal_syscall.o      \
    33                 $(HAL_ARCH)/build/core/hal_drivers.o      \
    34                 $(HAL_ARCH)/build/core/hal_kentry.o       \
    35                 $(HAL_ARCH)/build/core/hal_switch.o
     14                 $(HAL_ARCH)/build/drivers/soclib_bdv.o  \
     15                 $(HAL_ARCH)/build/drivers/soclib_hba.o  \
     16                 $(HAL_ARCH)/build/drivers/soclib_mmc.o  \
     17                 $(HAL_ARCH)/build/drivers/soclib_pic.o  \
     18                 $(HAL_ARCH)/build/drivers/soclib_nic.o  \
     19                 $(HAL_ARCH)/build/drivers/soclib_dma.o  \
     20                 $(HAL_ARCH)/build/drivers/soclib_iob.o
     21
     22  HAL_OBJS     = $(HAL_ARCH)/build/core/hal_special.o    \
     23                 $(HAL_ARCH)/build/core/hal_context.o    \
     24                 $(HAL_ARCH)/build/core/hal_atomic.o     \
     25                 $(HAL_ARCH)/build/core/hal_remote.o     \
     26                 $(HAL_ARCH)/build/core/hal_uspace.o     \
     27                 $(HAL_ARCH)/build/core/hal_irqmask.o    \
     28                 $(HAL_ARCH)/build/core/hal_gpt.o        \
     29                 $(HAL_ARCH)/build/core/hal_ppm.o        \
     30                 $(HAL_ARCH)/build/core/hal_vmm.o        \
     31                 $(HAL_ARCH)/build/core/hal_kentry.o     \
     32                 $(HAL_ARCH)/build/core/hal_switch.o     \
     33                 $(HAL_ARCH)/build/core/hal_syscall.o    \
     34                 $(HAL_ARCH)/build/core/hal_exception.o  \
     35                 $(HAL_ARCH)/build/core/hal_interrupt.o  \
     36                 $(HAL_ARCH)/build/core/hal_drivers.o
    3637endif
    3738
     
    3940
    4041  DRIVERS_OBJS = $(HAL_ARCH)/build/drivers/ioc_ata.o     \
    41                 $(HAL_ARCH)/build/drivers/pic_apic.o    \
    42                 $(HAL_ARCH)/build/drivers/txt_rs232.o
    43 
    44   CORE_OBJS    = \
    45                 $(HAL_ARCH)/build/core/hal_boot.o         \
    46                 $(HAL_ARCH)/build/core/hal_smpboot.o      \
    47                 $(HAL_ARCH)/build/core/hal_init.o         \
    48                 $(HAL_ARCH)/build/core/hal_cpu.o          \
    49                 $(HAL_ARCH)/build/core/hal_kentry.o       \
    50                 $(HAL_ARCH)/build/core/hal_acpi.o         \
    51                 $(HAL_ARCH)/build/core/hal_apic.o         \
    52                 $(HAL_ARCH)/build/core/x86_printf.o       \
    53                 $(HAL_ARCH)/build/core/hal_drivers.o      \
    54                 $(HAL_ARCH)/build/core/hal_special.o      \
    55                 $(HAL_ARCH)/build/core/hal_context.o      \
    56                 $(HAL_ARCH)/build/core/hal_atomic.o       \
    57                 $(HAL_ARCH)/build/core/hal_remote.o       \
    58                 $(HAL_ARCH)/build/core/hal_uspace.o       \
    59                 $(HAL_ARCH)/build/core/hal_irqmask.o      \
    60                 $(HAL_ARCH)/build/core/hal_gpt.o          \
    61                 $(HAL_ARCH)/build/core/hal_ppm.o          \
    62                 $(HAL_ARCH)/build/core/hal_exception.o    \
    63                 $(HAL_ARCH)/build/core/hal_interrupt.o    \
    64                 $(HAL_ARCH)/build/core/hal_syscall.o
    65 
     42                 $(HAL_ARCH)/build/drivers/pic_apic.o    \
     43                 $(HAL_ARCH)/build/drivers/txt_rs232.o
     44
     45  HAL_OBJS     = $(HAL_ARCH)/build/core/hal_boot.o       \
     46                 $(HAL_ARCH)/build/core/hal_smpboot.o    \
     47                 $(HAL_ARCH)/build/core/hal_init.o       \
     48                 $(HAL_ARCH)/build/core/hal_cpu.o        \
     49                 $(HAL_ARCH)/build/core/hal_kentry.o     \
     50                 $(HAL_ARCH)/build/core/hal_acpi.o       \
     51                 $(HAL_ARCH)/build/core/hal_apic.o       \
     52                 $(HAL_ARCH)/build/core/x86_printf.o     \
     53                 $(HAL_ARCH)/build/core/hal_drivers.o    \
     54                 $(HAL_ARCH)/build/core/hal_special.o    \
     55                 $(HAL_ARCH)/build/core/hal_context.o    \
     56                 $(HAL_ARCH)/build/core/hal_atomic.o     \
     57                 $(HAL_ARCH)/build/core/hal_remote.o     \
     58                 $(HAL_ARCH)/build/core/hal_uspace.o     \
     59                 $(HAL_ARCH)/build/core/hal_irqmask.o    \
     60                 $(HAL_ARCH)/build/core/hal_gpt.o        \
     61                 $(HAL_ARCH)/build/core/hal_ppm.o        \
     62                 $(HAL_ARCH)/build/core/hal_exception.o  \
     63                 $(HAL_ARCH)/build/core/hal_interrupt.o  \
     64                 $(HAL_ARCH)/build/core/hal_syscall.o
    6665endif
    6766
     
    187186# List of directories to be searched for included files
    188187# when compiling for kernel.elf generation
    189 KERNEL_INCLUDE = -I.                  \
    190                  -Ikern             \
    191                  -Idevices          \
    192                  -Isyscalls         \
     188KERNEL_INCLUDE = -I.                   \
     189                 -Ikern                \
     190                 -Idevices             \
     191                 -Isyscalls            \
    193192                 -I$(HAL_ARCH)/drivers \
    194                  -Isyscalls         \
    195                  -Ilibk             \
    196                  -Imm               \
    197                  -Ifs               \
    198                  -I../tools/arch_info         \
    199                  -I$(HAL)/generic             \
     193                 -Isyscalls            \
     194                 -Ilibk                \
     195                 -Imm                  \
     196                 -Ifs                  \
     197                 -I../tools/arch_info  \
     198                 -I$(HAL)/generic      \
    200199                 -I$(HAL_ARCH)/core    \
    201200                 -I..
     
    225224#######################################
    226225# Rules to generate kernel/kern objects
    227 build/kern/%.o:             kern/%.c             \
    228                                 kern/%.h             \
    229                                 kernel_config.h             \
    230                                 $(HAL_ARCH)/core/hal_types.h
     226build/kern/%.o:             kern/%.c                      \
     227                        kern/%.h                      \
     228                        kernel_config.h               \
     229                        $(HAL_ARCH)/core/hal_types.h
    231230        $(CC) $(KERNEL_INCLUDE) $(CFLAGS) -c -o $@ $<
    232231
     
    234233######################################
    235234# Rules to generate kernel/dev objects
    236 build/devices/%.o:       devices/%.c          \
    237                                 devices/%.h          \
    238                                 kernel_config.h             \
    239                                 $(HAL_ARCH)/core/hal_types.h
     235build/devices/%.o:      devices/%.c                   \
     236                        devices/%.h                   \
     237                        kernel_config.h               \
     238                        $(HAL_ARCH)/core/hal_types.h
    240239        $(CC) $(KERNEL_INCLUDE) $(CFLAGS) -c -o $@ $<
    241240
    242241#####################################
    243242# Rules to generate kernel/mm objects
    244 build/mm/%.o:            mm/%.c               \
    245                                 mm/%.h               \
    246                                 kernel_config.h             \
    247                                 $(HAL_ARCH)/core/hal_types.h
     243build/mm/%.o:           mm/%.c                        \
     244                        mm/%.h                        \
     245                        kernel_config.h               \
     246                        $(HAL_ARCH)/core/hal_types.h
    248247        $(CC) $(KERNEL_INCLUDE) $(CFLAGS) -c -o $@ $<
    249248
    250249#######################################
    251250# Rules to generate kernel/libk objects
    252 build/libk/%.o:          libk/%.c             \
    253                                 libk/%.h             \
    254                                 kernel_config.h             \
    255                                 $(HAL_ARCH)/core/hal_types.h
     251build/libk/%.o:         libk/%.c                      \
     252                        libk/%.h                      \
     253                        kernel_config.h               \
     254                        $(HAL_ARCH)/core/hal_types.h
    256255        $(CC) $(KERNEL_INCLUDE) $(CFLAGS) -c -o $@ $<
    257256
    258257###########################################
    259258# Rules to generate kernel/syscalls objects
    260 build/syscalls/%.o:      syscalls/%.c         \
    261                                 syscalls/syscalls.h  \
    262                                 kernel_config.h             \
    263                                 $(HAL_ARCH)/core/hal_types.h
     259build/syscalls/%.o:     syscalls/%.c                  \
     260                        syscalls/syscalls.h           \
     261                        kernel_config.h               \
     262                        $(HAL_ARCH)/core/hal_types.h
    264263        $(CC) $(KERNEL_INCLUDE) $(CFLAGS) -c -o $@ $<
    265264
    266265#######################################
    267266# Rules to generate kernel/fs objects
    268 build/fs/%.o:            fs/%.c               \
    269                                 fs/%.h               \
    270                                 kernel_config.h             \
    271                                 $(HAL_ARCH)/core/hal_types.h
    272         $(CC) $(KERNEL_INCLUDE) $(CFLAGS) -c -o $@ $<
    273 
    274 #############################################################
     267build/fs/%.o:           fs/%.c                        \
     268                        fs/%.h                        \
     269                        kernel_config.h               \
     270                        $(HAL_ARCH)/core/hal_types.h
     271        $(CC) $(KERNEL_INCLUDE) $(CFLAGS) -c -o $@ $<
     272
     273##############################
    275274# Rule to generate kernel.elf
    276 # TODO the syscalls grouped in SYS_OBJS_2 must be introduced
    277 build/kernel.elf:           $(KERN_OBJS)                \
    278                                 $(CORE_OBJS)                 \
    279                                 $(DEV_OBJS)                 \
    280                                 $(MM_OBJS)                  \
    281                                 $(LIBK_OBJS)                \
    282                                 $(DRIVERS_OBJS)             \
    283                                 $(VFS_OBJS)                 \
    284                                 $(SYS_OBJS_0)               \
    285                                 $(SYS_OBJS_1)               \
    286                                 $(SYS_OBJS_2)               \
    287                                 $(SYS_OBJS_3)               \
    288                                 $(SYS_OBJS_4)               \
    289                                                                 $(HAL_ARCH)/kernel.ld
    290         $(LD) -o $@ -T $(HAL_ARCH)/kernel.ld $(LIBGCC)      \
    291           $(KERN_OBJS) $(CORE_OBJS) $(DEV_OBJS) $(MM_OBJS)   \
    292           $(LIBK_OBJS) $(DRIVERS_OBJS) $(VFS_OBJS)          \
    293           $(SYS_OBJS_0) $(SYS_OBJS_1) $(SYS_OBJS_2)         \
     275build/kernel.elf:           $(KERN_OBJS)                 \
     276                        $(HAL_OBJS_0)                \
     277                        $(HAL_OBJS_1)                \
     278                        $(DEV_OBJS)                  \
     279                        $(MM_OBJS)                   \
     280                        $(LIBK_OBJS)                 \
     281                        $(DRIVERS_OBJS)              \
     282                        $(VFS_OBJS)                  \
     283                        $(SYS_OBJS_0)                \
     284                        $(SYS_OBJS_1)                \
     285                        $(SYS_OBJS_2)                \
     286                        $(SYS_OBJS_3)                \
     287                        $(SYS_OBJS_4)                \
     288                        $(HAL_ARCH)/kernel.ld
     289        $(LD) -o $@ -T $(HAL_ARCH)/kernel.ld $(LIBGCC)         \
     290          $(KERN_OBJS) $(HAL_OBJS) $(DEV_OBJS) $(MM_OBJS)  \
     291          $(LIBK_OBJS) $(DRIVERS_OBJS) $(VFS_OBJS)         \
     292          $(SYS_OBJS_0) $(SYS_OBJS_1) $(SYS_OBJS_2)        \
    294293          $(SYS_OBJS_3) $(SYS_OBJS_4) -lgcc
    295294        $(DU) -D $@ > $@.txt
  • trunk/kernel/devices/dev_fbf.c

    r438 r440  
    119119//////////////////////////////////////////////////////////////////////////////////
    120120// This static function is called by dev_fbf_read() & dev_fbf_write() functions.
    121 // It builds and registers the command in the calling thread descriptor, after
    122 // translation of buffer virtual address to physical address.
    123 // Then, it registers the calling thead in the relevant DMA chdev waiting queue.
     121// It builds and registers the command in the calling thread descriptor.
     122// Then, it registers the calling thread in the relevant DMA chdev waiting queue.
    124123// Finally it blocks on the THREAD_BLOCKED_DEV condition and deschedule.
    125124////////////////////////////////////i/////////////////////////////////////////////
     
    129128                               uint32_t  offset )
    130129{
    131     error_t     error;
    132     paddr_t     buf_paddr;
    133 
    134     thread_t * this = CURRENT_THREAD;              // pointer on client thread
    135 
    136     // Get buffer physical address
    137     error = vmm_v2p_translate( CONFIG_KERNEL_IDENTITY_MAP , buffer , &buf_paddr );
    138  
    139     // check buffer is mapped
    140     assert( (error == 0) , __FUNCTION__ ,
    141     "cannot translate vaddr = %p in process %x\n", buffer, this->process->pid );
    142130
    143131    // get extended pointer on FBF chdev descriptor
     
    160148
    161149    // compute extended pointers on frame buffer and memory buffer
    162     xptr_t  mem_buf_xp = XPTR( local_cxy , (void *)(intptr_t)buf_paddr );
     150    xptr_t  mem_buf_xp = XPTR( local_cxy , buffer );
    163151    xptr_t  fbf_buf_xp = base + offset;
    164152
  • trunk/kernel/devices/dev_ioc.c

    r438 r440  
    9191// This static function is called by dev_ioc_read() & dev_ioc_write() functions.
    9292// It builds and registers the command in the calling thread descriptor.
    93 // Then, it registers the calling thead in chdev waiting queue.
     93// Then, it registers the calling thead in IOCchdev waiting queue.
    9494// Finally it blocks on the THREAD_BLOCKED_IO condition and deschedule.
    9595////////////////////////////////////i/////////////////////////////////////////////
  • trunk/kernel/devices/dev_mmc.c

    r438 r440  
    5757
    5858/////////////////////////////////////////////////////////////////////////////
    59 // This static function is called by all MMC device functions.
     59// This static function is called by all MMC device access functions.
    6060// It makes some checking, takes the lock granting exclusive
    6161// access to MMC peripheral, call the driver to execute the command
     
    7171    // get MMC device cluster identifier & local pointer
    7272    cxy_t     dev_cxy = GET_CXY( dev_xp );
    73     chdev_t * dev_ptr = (chdev_t *)GET_PTR( dev_xp );
     73    chdev_t * dev_ptr = GET_PTR( dev_xp );
    7474
    7575    // get driver command function pointer from MMC device descriptor
     
    9797
    9898    // get calling thread local pointer
    99     thread_t * this = CURRENT_THREAD;
     99    thread_t  * this    = CURRENT_THREAD;
    100100
    101101#if DEBUG_DEV_MMC
     
    113113             "buffer not aligned on cache line" );
    114114
    115     // get buffer physical address
    116     paddr_t  buf_paddr;
    117     error = vmm_v2p_translate( CONFIG_KERNEL_IDENTITY_MAP , buf_ptr , &buf_paddr );
    118 
    119     assert( (error == 0) , __FUNCTION__ , "cannot get buffer paddr" );
    120 
    121115    // store command arguments in thread descriptor
    122116    this->mmc_cmd.dev_xp    = chdev_dir.mmc[buf_cxy];
    123117    this->mmc_cmd.type      = MMC_CC_INVAL;
    124     this->mmc_cmd.buf_paddr = buf_paddr;
     118    this->mmc_cmd.buf_ptr   = buf_ptr;
    125119    this->mmc_cmd.buf_size  = buf_size;
    126120
     
    144138    error_t error;
    145139
    146     // get calling thread local pointer
    147     thread_t * this = CURRENT_THREAD;
     140    thread_t  * this    = CURRENT_THREAD;
    148141
    149142#if DEBUG_DEV_MMC
     
    161154             "buffer not aligned on cache line" );
    162155
    163     // get  buffer physical address
    164     paddr_t  buf_paddr;
    165     error = vmm_v2p_translate( CONFIG_KERNEL_IDENTITY_MAP , buf_ptr , &buf_paddr );
    166 
    167     assert( (error == 0) , __FUNCTION__ , "cannot get buffer paddr" );
    168 
    169156    // store command arguments in thread descriptor
    170157    this->mmc_cmd.dev_xp    = chdev_dir.mmc[buf_cxy];
    171158    this->mmc_cmd.type      = MMC_CC_SYNC;
    172     this->mmc_cmd.buf_paddr = buf_paddr;
     159    this->mmc_cmd.buf_ptr   = buf_ptr;
    173160    this->mmc_cmd.buf_size  = buf_size;
    174161
  • trunk/kernel/devices/dev_mmc.h

    r437 r440  
    8383    xptr_t      dev_xp;     /*! extended pointer on target MMC device descriptor        */
    8484    uint32_t    type;       /*! CC_INVAL / CC_SYNC / GET_ERROR / SET_ERROR / GET_INSTRU */
    85     paddr_t     buf_paddr;  /*! physical address of memory buffer (used by INVAL/SYNC)  */
    86     uint32_t    buf_size;   /*! buffer size in bytes              (used by INVAL/SYNC)  */
     85    void      * buf_ptr;    /*! local pointer on memory buffer    (used by INVAL/SYNC)  */
     86    uint32_t    buf_size;   /*! memory buffer size (bytes)        (used by INVAL/SYNC)  */
    8787    uint32_t    reg_index;  /*! register index in MMC peripheral  (used by SET/GET)     */
    8888    uint32_t  * reg_ptr;    /*! local pointer on src/dst buffer   (used by SET/GET)     */
  • trunk/kernel/fs/fatfs.c

    r438 r440  
    6161
    6262//////////////////////////////////////////////////////////////////////////////////////////
    63 // This function returns the LBA of the first sector of a FAT cluster.
     63// This static function returns the LBA of the first sector of a FAT cluster.
    6464// This function can be called by any thread running in any cluster.
    6565//////////////////////////////////////////////////////////////////////////////////////////
  • trunk/kernel/kern/chdev.c

    r438 r440  
    124124{
    125125    thread_t * server_ptr;    // local pointer on server thread associated to chdev
     126    xptr_t     server_xp;     // extended pointer on server thread
    126127    core_t   * core_ptr;      // local pointer on core running the server thread
    127128    uint32_t   lid;           // core running the server thread local index
     
    140141    thread_t * this = CURRENT_THREAD;
    141142
    142     // get device descriptor cluster and local pointer
     143    // get chdev cluster and local pointer
    143144    cxy_t     chdev_cxy = GET_CXY( chdev_xp );
    144     chdev_t * chdev_ptr = (chdev_t *)GET_PTR( chdev_xp );
     145    chdev_t * chdev_ptr = GET_PTR( chdev_xp );
     146
     147    // get local and extended pointers on server thread
     148    server_ptr = (thread_t *)hal_remote_lpt( XPTR( chdev_cxy , &chdev_ptr->server) );
     149    server_xp  = XPTR( chdev_cxy , server_ptr );
     150
     151    // get local pointer on core running the server thread
     152    core_ptr   = (core_t *)hal_remote_lpt( XPTR( chdev_cxy , &server_ptr->core ) );
     153
     154    // get server core local index
     155    lid = hal_remote_lw( XPTR( chdev_cxy , &core_ptr->lid ) );
    145156
    146157#if (DEBUG_CHDEV_CMD_RX || DEBUG_CHDEV_CMD_TX)
     
    162173#endif
    163174
    164     // build extended pointers on client thread xlist and device root
    165     xptr_t  list_xp = XPTR( local_cxy , &this->wait_list );
    166     xptr_t  root_xp = XPTR( chdev_cxy , &chdev_ptr->wait_root );
    167 
    168     // get local pointer on server thread
    169     server_ptr = (thread_t *)hal_remote_lpt( XPTR( chdev_cxy , &chdev_ptr->server) );
    170 
    171     // build extended pointer on chdev lock protecting queue
     175    // build extended pointer on client thread xlist
     176    xptr_t  list_xp    = XPTR( local_cxy , &this->wait_list );
     177
     178    // build extended pointer on chdev waiting queue root
     179    xptr_t  root_xp    = XPTR( chdev_cxy , &chdev_ptr->wait_root );
     180
     181    // build extended pointer on server thread blocked state
     182    xptr_t  blocked_xp = XPTR( chdev_cxy , &server_ptr->blocked );
     183
     184    // build extended pointer on lock protecting chdev waiting queue
    172185    lock_xp = XPTR( chdev_cxy , &chdev_ptr->wait_lock );
    173186
    174     // get local pointer on core running the server thread
    175     core_ptr = (core_t *)hal_remote_lpt( XPTR( chdev_cxy , &server_ptr->core ) );
    176 
    177     // get core local index
    178     lid = hal_remote_lw( XPTR( chdev_cxy , &core_ptr->lid ) );
    179 
    180     // compute server core != thread core
    181     different = (lid != this->core->lid) || (local_cxy != chdev_cxy);
    182 
    183     // enter critical section to make atomic :
    184     // (1) client blocking
    185     // (2) client registration in server queue
    186     // (3) IPI to force server scheduling
    187     // (4) descheduling
     187    // critical section for the following sequence:
     188    // (1) take the lock protecting waiting queue
     189    // (2) block the client thread
     190    // (3) unblock the server thread if required
     191    // (4) register client thread in server queue
     192    // (5) send IPI to force server scheduling
     193    // (6) release the lock protecting waiting queue
     194    // (7) deschedule
    188195    // ... in this order
     196
     197    // enter critical section
    189198    hal_disable_irq( &save_sr );
     199
     200    // take the lock
     201    remote_spinlock_lock( lock_xp );
    190202
    191203    // block current thread
    192204    thread_block( XPTR( local_cxy , CURRENT_THREAD ) , THREAD_BLOCKED_IO );
    193205
     206    if( hal_remote_lw( blocked_xp ) & THREAD_BLOCKED_IDLE )
     207    thread_unblock( server_xp , THREAD_BLOCKED_IDLE );
     208
    194209    // register client thread in waiting queue
    195     remote_spinlock_lock( lock_xp );
    196210    xlist_add_last( root_xp , list_xp );
    197     remote_spinlock_unlock( lock_xp );
    198 
    199     // send IPI to core running the server thread if required
     211
     212    // send IPI to core running the server thread when server != client
     213    different = (lid != this->core->lid) || (local_cxy != chdev_cxy);
    200214    if( different ) dev_pic_send_ipi( chdev_cxy , lid );
    201215   
     216    // release lock
     217    remote_spinlock_unlock( lock_xp );
     218
    202219    // deschedule
    203220    assert( thread_can_yield( this ) , __FUNCTION__ , "illegal sched_yield\n" );
     
    260277            remote_spinlock_unlock( lock_xp );
    261278
     279            // block
     280            thread_block( XPTR( local_cxy , server ) , THREAD_BLOCKED_IDLE );
     281
    262282            // deschedule
     283            assert( thread_can_yield( server ) , __FUNCTION__ , "illegal sched_yield\n" );
    263284            sched_yield("I/O queue empty");
    264285        }
    265286        else                            // waiting queue not empty
    266287        {
     288            // get extended pointer on first client thread
     289            client_xp = XLIST_FIRST_ELEMENT( root_xp , thread_t , wait_list );
     290
     291            // get client thread cluster and local pointer
     292            client_cxy = GET_CXY( client_xp );
     293            client_ptr = GET_PTR( client_xp );
     294
     295            // remove this first client thread from waiting queue
     296            xlist_unlink( XPTR( client_cxy , &client_ptr->wait_list ) );
     297
    267298            // release lock
    268299            remote_spinlock_unlock( lock_xp );
    269 
    270             // get extended pointer on first client thread
    271             client_xp = XLIST_FIRST_ELEMENT( root_xp , thread_t , wait_list );
    272 
    273             // get client thread cluster, local pointer, and identifier
    274             client_cxy = GET_CXY( client_xp );
    275             client_ptr = (thread_t *)GET_PTR( client_xp );
    276300
    277301#if DEBUG_CHDEV_SERVER_RX
     
    300324            chdev->cmd( client_xp );
    301325       
    302             // remove the client thread from waiting queue
    303             remote_spinlock_lock( lock_xp );
    304             xlist_unlink( XPTR( client_cxy , &client_ptr->wait_list ) );
    305             remote_spinlock_unlock( lock_xp );
    306 
    307326            // unblock client thread
    308327            thread_unblock( client_xp , THREAD_BLOCKED_IO );
     
    343362    chdev_t     * chdev_ptr;
    344363
     364    assert( (file_xp != XPTR_NULL) , __FUNCTION__,
     365    "file_xp == XPTR_NULL\n" );
     366
    345367    // get cluster and local pointer on remote file descriptor
    346368    // associated inode and chdev are stored in same cluster as the file desc.
     
    353375
    354376    assert( (inode_type == INODE_TYPE_DEV) , __FUNCTION__ ,
    355     "inode type %d is not INODE_TYPE_DEV", inode_type );
     377    "inode type %d is not INODE_TYPE_DEV\n", inode_type );
    356378
    357379    // get chdev local pointer from inode extension
  • trunk/kernel/kern/chdev.h

    r428 r440  
    4242 * independant) Channel Device descriptor (in brief "chdev").
    4343 * ALMOS-MKH supports multi-channels peripherals, and defines one separated chdev
    44  * descriptor for each channel (and for each RX/TX direction for the NIC device).
     44 * descriptor for each channel (and for each RX/TX direction for the NIC and TXT devices).
    4545 * Each chdev contains a waiting queue, registering the "client threads" requests,
    4646 * and an associated "server thread", handling these requests.
  • trunk/kernel/kern/cluster.c

    r438 r440  
    153153#endif
    154154
    155     // initialises RPC fifo
    156         local_fifo_init( &cluster->rpc_fifo );
    157     cluster->rpc_threads = 0;
     155    // initialises RPC FIFOs
     156        for( lid = 0 ; lid < cluster->cores_nr; lid++ )
     157    {
     158            local_fifo_init( &cluster->rpc_fifo[lid] );
     159        cluster->rpc_threads[lid] = 0;
     160    }
    158161
    159162#if( DEBUG_CLUSTER_INIT & 1 )
     
    221224lid_t cluster_select_local_core()
    222225{
    223     uint32_t min = 100;
    224     lid_t    sel = 0;
    225     lid_t    lid;
     226    uint32_t      min = 1000;
     227    lid_t         sel = 0;
     228    uint32_t      nthreads;
     229    lid_t         lid;
     230    scheduler_t * sched;
    226231
    227232    cluster_t * cluster = LOCAL_CLUSTER;
     
    229234    for( lid = 0 ; lid < cluster->cores_nr ; lid++ )
    230235    {
    231         if( cluster->core_tbl[lid].usage < min )
     236        sched    = &cluster->core_tbl[lid].scheduler;
     237        nthreads = sched->u_threads_nr + sched->k_threads_nr;
     238
     239        if( nthreads < min )
    232240        {
    233             min = cluster->core_tbl[lid].usage;
     241            min = nthreads;
    234242            sel = lid;
    235243        }
     
    323331    bool_t      found;
    324332
     333#if DEBUG_CLUSTER_PID_ALLOC
     334uint32_t cycle = (uint32_t)hal_get_cycles();
     335if( DEBUG_CLUSTER_PID_ALLOC < cycle )
     336printk("\n[DBG] %s : thread %x enters in cluster %x / cycle %d\n",
     337__FUNCTION__ , CURRENT_THREAD , local_cxy , cycle );
     338#endif
     339
    325340    pmgr_t    * pm         = &LOCAL_CLUSTER->pmgr;
    326341
     
    361376    }
    362377
     378#if DEBUG_CLUSTER_PID_ALLOC
     379cycle = (uint32_t)hal_get_cycles();
     380if( DEBUG_CLUSTER_PID_ALLOC < cycle )
     381printk("\n[DBG] %s : thread %x exit in cluster %x / pid %x / cycle %d\n",
     382__FUNCTION__ , CURRENT_THREAD , local_cxy , *pid , cycle );
     383#endif
     384
    363385} // end cluster_pid_alloc()
    364386
     
    366388void cluster_pid_release( pid_t pid )
    367389{
     390
     391#if DEBUG_CLUSTER_PID_RELEASE
     392uint32_t cycle = (uint32_t)hal_get_cycles();
     393if( DEBUG_CLUSTER_PID_RELEASE < cycle )
     394printk("\n[DBG] %s : thread %x enters in cluster %x / pid %x / cycle %d\n",
     395__FUNCTION__ , CURRENT_THREAD , local_cxy , pid , cycle );
     396#endif
     397
    368398    cxy_t  owner_cxy  = CXY_FROM_PID( pid );
    369399    lpid_t lpid       = LPID_FROM_PID( pid );
     
    371401    pmgr_t  * pm = &LOCAL_CLUSTER->pmgr;
    372402
    373     // check pid argument
    374     assert( (lpid < CONFIG_MAX_PROCESS_PER_CLUSTER) && (owner_cxy == local_cxy) ,
    375     __FUNCTION__ , "illegal PID" );
     403    // check lpid
     404    assert( (lpid < CONFIG_MAX_PROCESS_PER_CLUSTER), __FUNCTION__ ,
     405    "illegal LPID = %d" , lpid );
     406
     407    // check owner cluster
     408    assert( (owner_cxy == local_cxy) , __FUNCTION__ ,
     409    "local_cluster %x !=  owner_cluster %x" , local_cxy , owner_cxy );
    376410
    377411    // get the process manager lock
     
    384418    // release the processs_manager lock
    385419    spinlock_unlock( &pm->pref_lock );
     420
     421#if DEBUG_CLUSTER_PID_RELEASE
     422cycle = (uint32_t)hal_get_cycles();
     423if( DEBUG_CLUSTER_PID_RELEASE < cycle )
     424printk("\n[DBG] %s : thread %x exit in cluster %x / cycle %d\n",
     425__FUNCTION__ , CURRENT_THREAD , local_cxy , cycle );
     426#endif
    386427
    387428} // end cluster_pid_release()
  • trunk/kernel/kern/cluster.h

    r438 r440  
    9696typedef struct cluster_s
    9797{
    98         spinlock_t        kcm_lock;          /*! local, protect creation of KCM allocators    */
     98        spinlock_t      kcm_lock;          /*! local, protect creation of KCM allocators      */
    9999
    100100    // global parameters
    101         uint32_t          paddr_width;       /*! numer of bits in physical address            */
    102     uint32_t          x_width;           /*! number of bits to code x_size  (can be 0)    */
    103     uint32_t          y_width;           /*! number of bits to code y_size  (can be 0)    */
    104         uint32_t          x_size;            /*! number of clusters in a row    (can be 1)    */
    105         uint32_t          y_size;            /*! number of clusters in a column (can be 1)    */
    106         cxy_t             io_cxy;            /*! io cluster identifier                        */
    107     uint32_t          dqdt_root_level;   /*! index of root node in dqdt_tbl[]             */
    108     uint32_t          nb_txt_channels;   /*! number of TXT channels                       */
    109     uint32_t          nb_nic_channels;   /*! number of NIC channels                       */
    110     uint32_t          nb_ioc_channels;   /*! number of IOC channels                       */
    111     uint32_t          nb_fbf_channels;   /*! number of FBF channels                       */
     101        uint32_t        paddr_width;       /*! numer of bits in physical address              */
     102    uint32_t        x_width;           /*! number of bits to code x_size  (can be 0)      */
     103    uint32_t        y_width;           /*! number of bits to code y_size  (can be 0)      */
     104        uint32_t        x_size;            /*! number of clusters in a row    (can be 1)      */
     105        uint32_t        y_size;            /*! number of clusters in a column (can be 1)      */
     106        cxy_t           io_cxy;            /*! io cluster identifier                          */
     107    uint32_t        dqdt_root_level;   /*! index of root node in dqdt_tbl[]               */
     108    uint32_t        nb_txt_channels;   /*! number of TXT channels                         */
     109    uint32_t        nb_nic_channels;   /*! number of NIC channels                         */
     110    uint32_t        nb_ioc_channels;   /*! number of IOC channels                         */
     111    uint32_t        nb_fbf_channels;   /*! number of FBF channels                         */
    112112
    113113    // local parameters
    114         uint32_t          cores_nr;          /*! actual number of cores in cluster            */
    115     uint32_t          ram_size;          /*! physical memory size                         */
    116     uint32_t          ram_base;          /*! physical memory base (local address)         */
    117 
    118         core_t            core_tbl[CONFIG_MAX_LOCAL_CORES];         /*! embedded cores        */
    119 
    120         list_entry_t      dev_root;          /*! root of list of devices in cluster           */
     114        uint32_t        cores_nr;          /*! actual number of cores in cluster              */
     115    uint32_t        ram_size;          /*! physical memory size                           */
     116    uint32_t        ram_base;          /*! physical memory base (local address)           */
     117
     118        core_t          core_tbl[CONFIG_MAX_LOCAL_CORES];    /*! embedded cores               */
     119
     120        list_entry_t    dev_root;          /*! root of list of devices in cluster             */
    121121
    122122    // memory allocators
    123         ppm_t             ppm;               /*! embedded kernel page manager                 */
    124         khm_t             khm;               /*! embedded kernel heap manager                 */
    125         kcm_t             kcm;               /*! embedded kernel cache manager (for KCMs)     */
    126 
    127     kcm_t           * kcm_tbl[KMEM_TYPES_NR];         /*! pointers on allocated KCMs      */
     123        ppm_t           ppm;               /*! embedded kernel page manager                   */
     124        khm_t           khm;               /*! embedded kernel heap manager                   */
     125        kcm_t           kcm;               /*! embedded kernel KCMs manager                   */
     126
     127    kcm_t         * kcm_tbl[KMEM_TYPES_NR];              /*! pointers on allocated KCMs   */
    128128
    129129    // RPC
    130         remote_fifo_t     rpc_fifo;          /*! RPC fifo (one per cluster)                   */
    131     uint32_t          rpc_threads;       /*! current number of RPC threads in cluster     */
     130        remote_fifo_t   rpc_fifo[CONFIG_MAX_LOCAL_CORES];    /*! one RPC FIFO per core        */
     131    uint32_t        rpc_threads[CONFIG_MAX_LOCAL_CORES]; /*! RPC threads per core         */
    132132
    133133    // DQDT
    134         dqdt_node_t       dqdt_tbl[CONFIG_DQDT_LEVELS_NR]; /*! embedded DQDT nodes in cluster */
     134        dqdt_node_t     dqdt_tbl[CONFIG_DQDT_LEVELS_NR];     /*! embedded DQDT nodes          */
    135135
    136136    // Local process manager
    137     pmgr_t            pmgr;            /*! embedded process manager                       */
    138 
    139     void            * pic_extend;      /*! PIC implementation specific extension          */
     137    pmgr_t          pmgr;              /*! embedded process manager                       */
     138
     139    void          * pic_extend;        /*! PIC implementation specific extension          */
    140140}
    141141cluster_t;
  • trunk/kernel/kern/kernel_init.c

    r438 r440  
    12381238    dev_pic_enable_timer( CONFIG_SCHED_TICK_MS_PERIOD );
    12391239
     1240#if DEBUG_KERNEL_INIT
     1241printk("\n[DBG] %s : thread %x on core[%x,%d] jumps to thread_idle_func() / cycle %d\n",
     1242__FUNCTION__ , CURRENT_THREAD , local_cxy , core_lid , (uint32_t)hal_get_cycles() );
     1243#endif
     1244
    12401245    // each core jump to thread_idle_func
    12411246    thread_idle_func();
  • trunk/kernel/kern/process.c

    r438 r440  
    106106    char        rx_path[40];
    107107    char        tx_path[40];
     108    xptr_t      file_xp;
    108109    xptr_t      chdev_xp;
    109110    chdev_t *   chdev_ptr;
     
    179180        assert( (stdin_id == 0) , __FUNCTION__ , "stdin index must be 0" );
    180181
     182#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
     183cycle = (uint32_t)hal_get_cycles();
     184if( DEBUG_PROCESS_REFERENCE_INIT )
     185printk("\n[DBG] %s : thread %x / stdin open for process %x / cycle %d\n",
     186__FUNCTION__ , CURRENT_THREAD , pid , cycle );
     187#endif
     188
    181189        // create stdout pseudo file         
    182190        error = vfs_open( process,
     
    190198        assert( (stdout_id == 1) , __FUNCTION__ , "stdout index must be 1" );
    191199
     200#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
     201cycle = (uint32_t)hal_get_cycles();
     202if( DEBUG_PROCESS_REFERENCE_INIT )
     203printk("\n[DBG] %s : thread %x / stdout open for process %x / cycle %d\n",
     204__FUNCTION__ , CURRENT_THREAD , pid , cycle );
     205#endif
     206
    192207        // create stderr pseudo file         
    193208        error = vfs_open( process,
     
    201216        assert( (stderr_id == 2) , __FUNCTION__ , "stderr index must be 2" );
    202217
     218#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
     219cycle = (uint32_t)hal_get_cycles();
     220if( DEBUG_PROCESS_REFERENCE_INIT )
     221printk("\n[DBG] %s : thread %x / stderr open for process %x / cycle %d\n",
     222__FUNCTION__ , CURRENT_THREAD , pid , cycle );
     223#endif
     224
    203225    }
    204226    else                                            // normal user process
    205227    {
     228        // get extended pointer on stdin pseudo file in model process
     229        file_xp = (xptr_t)hal_remote_lwd( XPTR( model_cxy , &model_ptr->fd_array.array[0] ) );
     230
    206231        // get extended pointer on model process TXT chdev
    207         chdev_xp = chdev_from_file( model_ptr->fd_array.array[0] );
     232        chdev_xp = chdev_from_file( file_xp );
    208233 
    209234        // get cluster and local pointer on chdev
     
    374399uint32_t cycle = (uint32_t)hal_get_cycles();
    375400if( DEBUG_PROCESS_DESTROY )
    376 printk("\n[DBG] %s : thread %x enter to destroy process %x (pid = %x) / cycle %d\n",
    377 __FUNCTION__ , CURRENT_THREAD , process, pid , cycle );
     401printk("\n[DBG] %s : thread %x enter in cluster %x / pid %x / process %x / cycle %d\n",
     402__FUNCTION__ , CURRENT_THREAD , pid , process , cycle );
    378403#endif
    379404
     
    401426    }
    402427
    403     // release the process PID to cluster manager
    404     cluster_pid_release( pid );
     428    // release the process PID to cluster manager if owner cluster
     429    if( CXY_FROM_PID( pid ) == local_cxy ) cluster_pid_release( pid );
    405430
    406431    // FIXME close all open files and update dirty [AG]
     
    507532    XLIST_FOREACH( root_xp , iter_xp )
    508533    {
     534        // atomically increment responses counter
     535        hal_atomic_add( (void *)&rpc.responses , 1 );
     536
     537        process_xp  = XLIST_ELEMENT( iter_xp , process_t , copies_list );
     538        process_cxy = GET_CXY( process_xp );
    509539
    510540#if DEBUG_PROCESS_SIGACTION
     
    513543__FUNCTION__ , process_action_str( action_type ) , pid , process_cxy );
    514544#endif
    515         // atomically increment responses counter
    516         hal_atomic_add( (void *)&rpc.responses , 1 );
    517 
    518         process_xp  = XLIST_ELEMENT( iter_xp , process_t , copies_list );
    519         process_cxy = GET_CXY( process_xp );
    520 
    521545        // call RPC in target cluster
    522546        rpc_process_sigaction_client( process_cxy , &rpc );
     
    529553    hal_restore_irq( save_sr);
    530554
    531     // client deschedule : will be unblocked by the last RPC server thread
     555    // client thread deschedule : will be unblocked by the last RPC server thread
    532556    sched_yield("blocked on rpc_process_sigaction");
    533557
     
    542566
    543567/////////////////////////////////////////////////
    544 void process_block_threads( process_t * process )
     568void process_block_threads( process_t * process,
     569                            xptr_t      client_xp )
    545570{
    546571    thread_t          * target;         // pointer on target thread
     
    567592    spinlock_lock( &process->th_lock );
    568593
    569     // loop to block all threads but the main thread
     594    // loop on target process local threads
    570595    // we use both "ltid" and "count" because it can exist "holes" in th_tbl
    571596    for( ltid = 0 , count = 0 , ack_count = 0 ; count < process->th_nr ; ltid++ )
     
    577602            count++;
    578603
    579             // main thread should not be deleted
    580             if( (ltid != 0) || (owner_cxy != local_cxy) )
     604            // main thread and client thread should not be blocked
     605            if( ((ltid != 0) || (owner_cxy != local_cxy)) &&         // not main thread
     606                (client_xp) != XPTR( local_cxy , target ) )          // not client thread
    581607            {
    582608                // set the global blocked bit in target thread descriptor.
     
    626652}  // end process_block_threads()
    627653
    628 ///////////////////////////////////////////////////
    629 void process_unblock_threads( process_t * process )
    630 {
    631     thread_t          * target;        // pointer on target thead
     654/////////////////////////////////////////////////
     655void process_delete_threads( process_t * process,
     656                             xptr_t      client_xp )
     657{
    632658    thread_t          * this;          // pointer on calling thread
     659    thread_t          * target;        // local pointer on target thread
     660    xptr_t              target_xp;     // extended pointer on target thread
     661    cxy_t               owner_cxy;     // owner process cluster
    633662    uint32_t            ltid;          // index in process th_tbl
    634     uint32_t            count;         // requests counter
     663    uint32_t            count;         // threads counter
    635664
    636665    // get calling thread pointer
    637666    this = CURRENT_THREAD;
     667
     668    // get target process owner cluster
     669    owner_cxy = CXY_FROM_PID( process->pid );
    638670
    639671#if DEBUG_PROCESS_SIGACTION
     
    647679    spinlock_lock( &process->th_lock );
    648680
    649     // loop on process threads to unblock all threads
     681    // loop on target process local threads                       
    650682    // we use both "ltid" and "count" because it can exist "holes" in th_tbl
    651     for( ltid = 0 , count = 0 ; count < process->th_nr ; ltid++ )
     683    for( ltid = 0 , count = 0  ; count < process->th_nr ; ltid++ )
    652684    {
    653685        target = process->th_tbl[ltid];
    654686
    655         if( target != NULL )             // thread found
     687        if( target != NULL )    // valid thread 
    656688        {
    657689            count++;
    658 
    659             // reset the global blocked bit in target thread descriptor.
    660             thread_unblock( XPTR( local_cxy , target ) , THREAD_BLOCKED_GLOBAL );
     690            target_xp = XPTR( local_cxy , target );
     691
     692            // main thread and client thread should not be blocked
     693            if( ((ltid != 0) || (owner_cxy != local_cxy)) &&         // not main thread
     694                (client_xp) != target_xp )                           // not client thread
     695            {
     696                // mark target thread for delete and block it
     697                thread_delete( target_xp , process->pid , false );   // not forced
     698            }
    661699        }
    662700    }
     
    672710#endif
    673711
    674 }  // end process_unblock_threads()
    675 
    676 //////////////////////////////////////////////////
    677 void process_delete_threads( process_t * process )
    678 {
    679     thread_t          * target;        // pointer on target thread
     712}  // end process_delete_threads()
     713
     714///////////////////////////////////////////////////
     715void process_unblock_threads( process_t * process )
     716{
     717    thread_t          * target;        // pointer on target thead
     718    thread_t          * this;          // pointer on calling thread
    680719    uint32_t            ltid;          // index in process th_tbl
    681     uint32_t            count;         // threads counter
     720    uint32_t            count;         // requests counter
     721
     722    // get calling thread pointer
     723    this = CURRENT_THREAD;
    682724
    683725#if DEBUG_PROCESS_SIGACTION
     
    685727if( DEBUG_PROCESS_SIGACTION < cycle )
    686728printk("\n[DBG] %s : thread %x enter for process %x in cluster %x / cycle %d\n",
    687 __FUNCTION__ , CURRENT_THREAD , process->pid , local_cxy , cycle );
     729__FUNCTION__ , this , process->pid , local_cxy , cycle );
    688730#endif
    689731
     
    691733    spinlock_lock( &process->th_lock );
    692734
    693     // loop to set the REQ_DELETE flag on all threads but the main
     735    // loop on process threads to unblock all threads
    694736    // we use both "ltid" and "count" because it can exist "holes" in th_tbl
    695     for( ltid = 0 , count = 0  ; count < process->th_nr ; ltid++ )
     737    for( ltid = 0 , count = 0 ; count < process->th_nr ; ltid++ )
    696738    {
    697739        target = process->th_tbl[ltid];
    698740
    699         if( target != NULL )
     741        if( target != NULL )             // thread found
    700742        {
    701743            count++;
    702            
    703             thread_kill( XPTR( local_cxy , target ),
    704                          false,                       // is_exit
    705                          true );                      // is_forced
     744
     745            // reset the global blocked bit in target thread descriptor.
     746            thread_unblock( XPTR( local_cxy , target ) , THREAD_BLOCKED_GLOBAL );
    706747        }
    707748    }
     
    714755if( DEBUG_PROCESS_SIGACTION < cycle )
    715756printk("\n[DBG] %s : thread %x exit for process %x in cluster %x / cycle %d\n",
    716 __FUNCTION__ , CURRENT_THREAD , process->pid , local_cxy , cycle );
    717 #endif
    718 
    719 }  // end process_delete_threads()
     757__FUNCTION__ , this , process->pid , local_cxy , cycle );
     758#endif
     759
     760}  // end process_unblock_threads()
    720761
    721762///////////////////////////////////////////////
     
    749790
    750791    // allocate memory for a new local process descriptor
    751     // and initialise it from reference cluster if required
     792    // and initialise it from reference cluster if not found
    752793    if( !found )
    753794    {
     
    765806        if( error ) return NULL;
    766807    }
     808
     809#if DEBUG_PROCESS_GET_LOCAL_COPY
     810uint32_t cycle = (uint32_t)hal_get_cycles();
     811if( DEBUG_PROCESS_GET_LOCAL_COPY < cycle )
     812printk("\n[DBG] %s : enter in cluster %x / pid %x / process %x / cycle %d\n",
     813__FUNCTION__ , local_cxy , pid , process_ptr , cycle );
     814#endif
    767815
    768816    return process_ptr;
     
    10321080    // check parent process is the reference process
    10331081    ref_xp = hal_remote_lwd( XPTR( parent_process_cxy , &parent_process_ptr->ref_xp ) );
    1034 
    1035 printk("\n@@@ %s : parent_cxy = %x / parent_ptr = %x / ref_cxy = %x / ref_ptr = %x\n",
    1036 __FUNCTION__, parent_process_cxy, parent_process_ptr, GET_CXY( ref_xp ), GET_PTR( ref_xp ) );
    10371082
    10381083    assert( (parent_process_xp == ref_xp ) , __FUNCTION__ ,
  • trunk/kernel/kern/process.h

    r436 r440  
    101101 * 4) The <sem_root>, <mutex_root>, <barrier_root>, <condvar_root>, and the associated
    102102 *    <sync_lock>, that are dynamically allocated, are only defined in the reference cluster.
    103  * 5) The <children_root>, <children_nr>, <brothers_list>, and <txt_list> fields are only
     103 * 5) The <children_root>, <children_nr>, <children_list>, and <txt_list> fields are only
    104104 *    defined in the reference cluster, and are undefined in other clusters.
    105105 * 6) The <local_list>, <copies_list>, <th_tbl>, <th_nr>, <th_lock> fields
    106106 *    are defined in all process descriptors copies.
    107107 * 7) The termination <flags> and <exit_status> are only defined in the reference cluster.
     108 *    The term state format is defined in the shared_syscalls.h file.
    108109 ********************************************************************************************/
    109110
     
    282283 * all threads of a process identified by the <pid> argument, depending on the
    283284 * <action_type> argument.
    284  * WARNING : the DELETE action is NOT executed on the target process main thread
    285  * (thread 0 in process owner cluster).
     285 * WARNING : the DELETE and BLOCK actions are NOT executed on the target process main thread
     286 * (thread 0 in process owner cluster), and not executed on the calling thread itself.
    286287 * It uses the multicast, non blocking rpc_process_sigaction_client() function to send
    287  * parallel requests to all remote clusters containing a process copy.
     288 * parallel requests to all remote clusters containing process copies.
    288289 * Then it blocks and deschedule to wait completion of these parallel requests.
    289290 *
     
    305306
    306307/*********************************************************************************************
    307  * This function blocks all threads - but the main thread - for a given <process>
    308  * in a given cluster. It sets the THREAD_BLOCKED_GLOBAL bit in the thread descriptor,
    309  * and request the relevant schedulers to acknowledge the blocking, using IPI if required.
     308 * This function blocks all threads for a given <process> in the local cluster.
     309 * It scan the list of local thread, and sets the THREAD_BLOCKED_GLOBAL bit for all
     310 * threads, BUT the main thread (thread 0 in owner cluster), and the client thread
     311 * identified by the <client_xp> argument. It request the relevant schedulers to acknowledge
     312 * the blocking, using IPI if required, and returns only when all blockable threads
     313 * in cluster are actually blocked.
    310314 * The threads are not detached from the scheduler, and not detached from the local process.
    311  * This function returns only when all blockable threads in cluster are actually blocked.
    312315 *********************************************************************************************
    313316 * @ process     : pointer on the target process descriptor.
    314  ********************************************************************************************/
    315 void process_block_threads( process_t * process );
     317 * @ client_xp   : extended pointer on the client thread that should not be blocked.
     318 ********************************************************************************************/
     319void process_block_threads( process_t * process,
     320                            xptr_t      client_xp );
     321
     322/*********************************************************************************************
     323 * This function marks for deletion all threads for a given <process> in the local cluster.
     324 * It scan the list of local thread, and sets the THREAD_FLAG_REQ_DELETE bit for all
     325 * threads, BUT the main thread (thread 0 in owner cluster), and the client thread
     326 * identified by the <client_xp> argument.
     327 * The actual deletion will be done by the scheduler at the next scheduling point.
     328 *********************************************************************************************
     329 * @ process     : pointer on the process descriptor.
     330 * @ client_xp   : extended pointer on the client thread that should not be marked.
     331 ********************************************************************************************/
     332void process_delete_threads( process_t * process,
     333                            xptr_t       client_xp );
    316334
    317335/*********************************************************************************************
     
    321339 ********************************************************************************************/
    322340void process_unblock_threads( process_t * process );
    323 
    324 /*********************************************************************************************
    325  * This function marks for deletion all threads - but the main thread - for a given <process>
    326  * in a given cluster. It sets the THREAD_FLAG_REQ_DELETE bit. For each marked thread,
    327  * the following actions will be done by the scheduler at the next scheduling point:
    328  * - the thread will be detached from the scheduler.
    329  * - the thread will be detached from the local process descriptor.
    330  * - the thread will be detached from parent if required.
    331  * - the memory allocated to the thread descriptor is released.
    332  * - the memory allocated to the process descriptor is released, if it is the last thread.
    333  *********************************************************************************************
    334  * @ process     : pointer on the process descriptor.
    335  ********************************************************************************************/
    336 void process_delete_threads( process_t * process );
    337341
    338342/*********************************************************************************************
     
    398402                            struct thread_s ** child_thread_ptr );
    399403
    400 
    401404/********************   File Management Operations   ****************************************/
    402405
  • trunk/kernel/kern/rpc.c

    r438 r440  
    114114    client_core_lid = this->core->lid;
    115115
    116     // select a server_core index:
    117     // use client core index if possible / core 0 otherwise
     116    // select a server_core : use client core index if possible / core 0 otherwise
    118117    if( client_core_lid < hal_remote_lw( XPTR( server_cxy , &cluster->cores_nr ) ) )
    119118    {
     
    133132
    134133    // get local pointer on rpc_fifo in remote cluster,
    135     remote_fifo_t * rpc_fifo = &LOCAL_CLUSTER->rpc_fifo;
     134    remote_fifo_t * rpc_fifo = &LOCAL_CLUSTER->rpc_fifo[server_core_lid];
    136135
    137136        // post RPC in remote fifo / deschedule and retry if fifo full
     
    231230    core_t        * core     = this->core;
    232231    scheduler_t   * sched    = &core->scheduler;
    233         remote_fifo_t * rpc_fifo = &LOCAL_CLUSTER->rpc_fifo;
     232        remote_fifo_t * rpc_fifo = &LOCAL_CLUSTER->rpc_fifo[core->lid];
    234233
    235234#if DEBUG_RPC_SERVER_GENERIC
     
    243242        hal_disable_irq( &sr_save );
    244243
    245     // activate (or create) RPC thread if RPC FIFO not empty
     244    // activate (or create) RPC thread if RPC FIFO not empty and no acive RPC thread
    246245        if( (rpc_fifo->owner == 0) && (local_fifo_is_empty(rpc_fifo) == false) )
    247246    {
     
    254253#endif
    255254
    256         // search one IDLE RPC thread  
     255        // search one IDLE RPC thread associated to the selected core  
    257256        list_entry_t * iter;
    258257        LIST_FOREACH( &sched->k_root , iter )
     
    270269        }
    271270
    272         // create new RPC thread if not found   
     271        // create new RPC thread for the selected core if not found   
    273272        if( found == false )                   
    274273        {
     
    277276                                                      &rpc_thread_func,
    278277                                          NULL,
    279                                                       this->core->lid );
    280                 if( error )
    281             {
    282                 assert( false , __FUNCTION__ ,
    283                 "no memory to allocate a new RPC thread in cluster %x", local_cxy );
    284             }
     278                                                      core->lid );
     279                 
     280            assert( (error == 0), __FUNCTION__ ,
     281            "no memory to allocate a new RPC thread in cluster %x", local_cxy );
    285282
    286283            // unblock created RPC thread
    287284            thread->blocked = 0;
    288285
    289             // update core descriptor counter 
    290             hal_atomic_add( &LOCAL_CLUSTER->rpc_threads , 1 );
     286            // update RRPC threads counter 
     287            hal_atomic_add( &LOCAL_CLUSTER->rpc_threads[core->lid] , 1 );
    291288
    292289#if DEBUG_RPC_SERVER_GENERIC
     
    325322void rpc_thread_func()
    326323{
    327     uint32_t     count;       // handled RPC requests counter
    328     error_t      empty;       // local RPC fifo state
    329     xptr_t       desc_xp;     // extended pointer on RPC request
    330     cxy_t        desc_cxy;    // RPC request cluster (client)
    331     rpc_desc_t * desc_ptr;    // RPC request local pointer
    332     uint32_t     index;       // RPC request index
    333     thread_t   * thread_ptr;  // local pointer on client thread
    334     lid_t        core_lid;    // local index of client core
    335     bool_t       blocking;    // blocking RPC when true
     324    error_t         empty;              // local RPC fifo state
     325    xptr_t          desc_xp;            // extended pointer on RPC request
     326    cxy_t           desc_cxy;           // RPC request cluster (client)
     327    rpc_desc_t    * desc_ptr;           // RPC request local pointer
     328    uint32_t        index;              // RPC request index
     329    thread_t      * client_ptr;         // local pointer on client thread
     330        thread_t      * server_ptr;         // local pointer on server thread
     331    xptr_t          server_xp;          // extended pointer on server thread
     332    lid_t           client_core_lid;    // local index of client core
     333    lid_t           server_core_lid;    // local index of server core
     334    bool_t          blocking;           // blocking RPC when true
     335        remote_fifo_t * rpc_fifo;           // local pointer on RPC fifo
    336336 
    337337    // makes RPC thread not preemptable
    338338        hal_disable_irq( NULL );
    339339 
    340         thread_t      * this     = CURRENT_THREAD;
    341         remote_fifo_t * rpc_fifo = &LOCAL_CLUSTER->rpc_fifo;
     340        server_ptr      = CURRENT_THREAD;
     341    server_xp       = XPTR( local_cxy , server_ptr );
     342    server_core_lid = server_ptr->core->lid;
     343        rpc_fifo        = &LOCAL_CLUSTER->rpc_fifo[server_core_lid];
    342344
    343345    // two embedded loops:
    344346    // - external loop : "infinite" RPC thread
    345     // - internal loop : handle up to CONFIG_RPC_PENDING_MAX RPC requests
     347    // - internal loop : handle one RPC request per iteration
    346348 
    347349        while(1)  // infinite loop
    348350        {
    349351        // try to take RPC_FIFO ownership
    350         if( hal_atomic_test_set( &rpc_fifo->owner , this->trdid ) )
     352        if( hal_atomic_test_set( &rpc_fifo->owner , server_ptr->trdid ) )
    351353        {
    352354
     
    355357if( DEBUG_RPC_SERVER_GENERIC < cycle )
    356358printk("\n[DBG] %s : RPC thread %x in cluster %x takes RPC fifo ownership / cycle %d\n",
    357 __FUNCTION__, this, local_cxy, cycle );
    358 #endif
    359             // initializes RPC requests counter
    360             count = 0;
    361 
    362                     // exit internal loop in three cases:
    363             // - RPC fifo is empty
    364             // - ownership has been lost (because descheduling)
    365             // - max number of RPCs is reached
    366                 while( 1 )  // internal loop
     359__FUNCTION__, server_ptr, local_cxy, cycle );
     360#endif
     361                while( 1 )  //  one RPC request per iteration
    367362            {
    368363                    empty = local_fifo_get_item( rpc_fifo , (uint64_t *)&desc_xp );
    369364
    370                     if ( empty == 0 ) // one RPC request found
     365                // exit when FIFO empty or FIFO ownership lost (in case of descheduling)
     366                    if ( (empty == 0) && (rpc_fifo->owner == server_ptr->trdid) )
    371367                {
    372368                    // get client cluster and pointer on RPC descriptor
     
    381377if( DEBUG_RPC_SERVER_GENERIC < cycle )
    382378printk("\n[DBG] %s : RPC thread %x in cluster %x got rpc[%d] / rpc_cxy %x / rpc_ptr %x\n",
    383 __FUNCTION__, this, local_cxy, index, desc_cxy, desc_ptr );
     379__FUNCTION__, server_ptr, local_cxy, index, desc_cxy, desc_ptr );
    384380#endif
    385381                    // call the relevant server function
     
    390386if( DEBUG_RPC_SERVER_GENERIC < cycle )
    391387printk("\n[DBG] %s : RPC thread %x in cluster %x completes rpc[%d] / rpc_ptr %x / cycle %d\n",
    392 __FUNCTION__, this, local_cxy, index, desc_ptr, cycle );
    393 #endif
    394                     // increment handled RPCs counter
    395                         count++;
    396 
     388__FUNCTION__, server_ptr, local_cxy, index, desc_ptr, cycle );
     389#endif
    397390                    // decrement response counter in RPC descriptor if blocking
    398391                    if( blocking )
     
    402395
    403396                        // get client thread pointer and client core lid from RPC descriptor
    404                         thread_ptr = hal_remote_lpt( XPTR( desc_cxy , &desc_ptr->thread ) );
    405                         core_lid  = hal_remote_lw ( XPTR( desc_cxy , &desc_ptr->lid ) );
     397                        client_ptr      = hal_remote_lpt( XPTR( desc_cxy , &desc_ptr->thread ) );
     398                        client_core_lid = hal_remote_lw ( XPTR( desc_cxy , &desc_ptr->lid ) );
    406399
    407400                        // unblock client thread
    408                         thread_unblock( XPTR( desc_cxy , thread_ptr ) , THREAD_BLOCKED_RPC );
     401                        thread_unblock( XPTR( desc_cxy , client_ptr ) , THREAD_BLOCKED_RPC );
    409402
    410403                        hal_fence();
     
    414407if( DEBUG_RPC_SERVER_GENERIC < cycle )
    415408printk("\n[DBG] %s : RPC thread %x (cluster %x) unblocked client thread %x (cluster %x)\n",
    416 __FUNCTION__, this, local_cxy, thread_ptr, desc_cxy, cycle );
     409__FUNCTION__, server_ptr, local_cxy, client_ptr, desc_cxy, cycle );
    417410#endif
    418411                        // send IPI to client core
    419                             dev_pic_send_ipi( desc_cxy , core_lid );
     412                            dev_pic_send_ipi( desc_cxy , client_core_lid );
    420413                    }
    421414                        }
    422        
    423                 // chek exit condition
    424                         if( local_fifo_is_empty( rpc_fifo )  ||
    425                     (rpc_fifo->owner != this->trdid) ||
    426                     (count >= CONFIG_RPC_PENDING_MAX) ) break;
     415                else
     416                {
     417                    break;
     418                }
    427419                } // end internal loop
    428420
    429421            // release rpc_fifo ownership if not lost
    430             if( rpc_fifo->owner == this->trdid ) rpc_fifo->owner = 0;
     422            if( rpc_fifo->owner == server_ptr->trdid ) rpc_fifo->owner = 0;
    431423
    432424        }  // end if RPC fifo
    433425
    434         // sucide if too many RPC threads in cluster
    435         if( LOCAL_CLUSTER->rpc_threads >= CONFIG_RPC_THREADS_MAX )
     426        // RPC thread blocks on IDLE
     427        thread_block( server_xp , THREAD_BLOCKED_IDLE );
     428
     429        // sucide if too many RPC threads / simply deschedule otherwise
     430        if( LOCAL_CLUSTER->rpc_threads[server_core_lid] >= CONFIG_RPC_THREADS_MAX )
    436431            {
    437432
     
    440435if( DEBUG_RPC_SERVER_GENERIC < cycle )
    441436printk("\n[DBG] %s : RPC thread %x in cluster %x suicides / cycle %d\n",
    442 __FUNCTION__, this, local_cxy, cycle );
     437__FUNCTION__, server_ptr, local_cxy, cycle );
    443438#endif
    444439            // update RPC threads counter
    445440                hal_atomic_add( &LOCAL_CLUSTER->rpc_threads , -1 );
    446441
    447             // suicide
    448                 thread_kill( XPTR( local_cxy , this ),
    449                          true,                      // is_exit
    450                          true );                    // is forced
     442            // RPC thread blocks on GLOBAL
     443                thread_block( server_xp , THREAD_BLOCKED_GLOBAL );
     444
     445            // RPC thread set the REQ_DELETE flag to suicide
     446            hal_remote_atomic_or( server_xp , THREAD_FLAG_REQ_DELETE );
    451447            }
     448        else
     449        {
    452450
    453451#if DEBUG_RPC_SERVER_GENERIC
    454452uint32_t cycle = (uint32_t)hal_get_cycles();
    455453if( DEBUG_RPC_SERVER_GENERIC < cycle )
    456 printk("\n[DBG] %s : RPC thread %x in cluster %x deschedules / cycle %d\n",
    457 __FUNCTION__, this, local_cxy, cycle );
    458 #endif
    459 
    460         // Block and deschedule
    461         thread_block( XPTR( local_cxy , this ) , THREAD_BLOCKED_IDLE );
    462         sched_yield("RPC fifo empty or too much work");
    463 
    464 #if DEBUG_RPC_SERVER_GENERIC
    465 cycle = (uint32_t)hal_get_cycles();
    466 if( DEBUG_RPC_SERVER_GENERIC < cycle )
    467 printk("\n[DBG] %s : RPC thread %x in cluster %x resumes / cycle %d\n",
    468 __FUNCTION__, this, local_cxy, cycle );
    469 #endif
     454printk("\n[DBG] %s : RPC thread %x in cluster %x block & deschedules / cycle %d\n",
     455__FUNCTION__, server_ptr, local_cxy, cycle );
     456#endif
     457
     458            // RPC thread deschedules
     459            assert( thread_can_yield( server_ptr ) , __FUNCTION__, "illegal sched_yield\n" );
     460            sched_yield("RPC fifo empty");
     461        }
    470462
    471463        } // end infinite loop
     
    646638
    647639    // set input arguments in RPC descriptor 
    648     rpc.args[0] = (uint64_t)(intptr_t)ref_process_xp;
    649     rpc.args[1] = (uint64_t)(intptr_t)parent_thread_xp;
     640    rpc.args[0] = (uint64_t)ref_process_xp;
     641    rpc.args[1] = (uint64_t)parent_thread_xp;
    650642
    651643    // register RPC request in remote RPC fifo
     
    903895void rpc_process_sigaction_server( xptr_t xp )
    904896{
    905     pid_t        pid;              // target process identifier
    906     process_t  * process;          // pointer on local target process descriptor
    907     uint32_t     action;           // sigaction index
    908     thread_t   * client_thread;    // pointer on client thread in client cluster
    909     cxy_t        client_cxy;       // client cluster identifier
    910     rpc_desc_t * rpc;              // pointer on rpc descriptor in client cluster
    911     xptr_t       count_xp;         // extended pointer on response counter
    912     lid_t        client_lid;       // client core local index
     897    pid_t        pid;             // target process identifier
     898    process_t  * process;         // pointer on local target process descriptor
     899    uint32_t     action;          // sigaction index
     900    thread_t   * client_ptr;      // pointer on client thread in client cluster
     901    xptr_t       client_xp;       // extended pointer client thread
     902    cxy_t        client_cxy;      // client cluster identifier
     903    rpc_desc_t * rpc;             // pointer on rpc descriptor in client cluster
     904    xptr_t       count_xp;        // extended pointer on responses counter
     905    uint32_t     count_value;     // responses counter value
     906    lid_t        client_lid;      // client core local index
    913907
    914908    // get client cluster identifier and pointer on RPC descriptor
     
    927921#endif
    928922
     923    // get client thread pointers
     924    client_ptr = (thread_t *)hal_remote_lpt( XPTR( client_cxy , &rpc->thread ) );
     925    client_xp  = XPTR( client_cxy , client_ptr );
     926
    929927    // get local process descriptor
    930928    process = cluster_get_local_process_from_pid( pid );
    931929
    932930    // call relevant kernel function
    933     if      ( action == DELETE_ALL_THREADS  ) process_delete_threads ( process );
    934     else if ( action == BLOCK_ALL_THREADS   ) process_block_threads  ( process );
     931    if      ( action == DELETE_ALL_THREADS  ) process_delete_threads ( process , client_xp );
     932    else if ( action == BLOCK_ALL_THREADS   ) process_block_threads  ( process , client_xp );
    935933    else if ( action == UNBLOCK_ALL_THREADS ) process_unblock_threads( process );
    936934
     
    939937
    940938    // decrement the responses counter in RPC descriptor,
     939    count_value = hal_remote_atomic_add( count_xp , -1 );
     940
    941941    // unblock the client thread only if it is the last response.
    942     if( hal_remote_atomic_add( count_xp , -1 ) == 1 )
     942    if( count_value == 1 )
    943943    {
    944         // get client thread pointer and client core lid
    945         client_thread = (thread_t *)hal_remote_lpt( XPTR( client_cxy , &rpc->thread ) );
     944        // get client core lid
    946945        client_lid    = (lid_t)     hal_remote_lw ( XPTR( client_cxy , &rpc->lid    ) );
    947946
    948         thread_unblock( XPTR( client_cxy , client_thread ) , THREAD_BLOCKED_RPC );
     947        // unblock client thread
     948        thread_unblock( client_xp , THREAD_BLOCKED_RPC );
     949
     950        // send an IPI to client core
    949951        dev_pic_send_ipi( client_cxy , client_lid );
    950952    }
     
    11921194                                    vfs_dentry_t * dentry )
    11931195{
     1196#if DEBUG_RPC_VFS_DENTRY_DESTROY
     1197uint32_t cycle = (uint32_t)hal_get_cycles();
     1198if( cycle > DEBUG_RPC_VFS_DENTRY_DESTROY )
     1199printk("\n[DBG] %s : thread %x enter on core[%x,%d] / cycle %d\n",
     1200__FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle );
     1201#endif
     1202
    11941203    assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");
    11951204
     
    12061215    rpc_send( cxy , &rpc );
    12071216
     1217#if DEBUG_RPC_VFS_DENTRY_DESTROY
     1218cycle = (uint32_t)hal_get_cycles();
     1219if( cycle > DEBUG_RPC_VFS_DENTRY_DESTROY )
     1220printk("\n[DBG] %s : thread %x exit / cycle %d\n",
     1221__FUNCTION__ , CURRENT_THREAD , cycle );
     1222#endif
    12081223}
    12091224
     
    12111226void rpc_vfs_dentry_destroy_server( xptr_t xp )
    12121227{
     1228#if DEBUG_RPC_VFS_DENTRY_DESTROY
     1229uint32_t cycle = (uint32_t)hal_get_cycles();
     1230if( cycle > DEBUG_RPC_VFS_DENTRY_DESTROY )
     1231printk("\n[DBG] %s : thread %x enter on core[%x,%d] / cycle %d\n",
     1232__FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle );
     1233#endif
     1234
    12131235    vfs_dentry_t * dentry;
    12141236
     
    12231245    vfs_dentry_destroy( dentry );
    12241246
     1247#if DEBUG_RPC_VFS_DENTRY_DESTROY
     1248cycle = (uint32_t)hal_get_cycles();
     1249if( cycle > DEBUG_RPC_VFS_DENTRY_DESTROY )
     1250printk("\n[DBG] %s : thread %x exit / cycle %d\n",
     1251__FUNCTION__ , CURRENT_THREAD , cycle );
     1252#endif
    12251253}
    12261254
     
    13191347                                  vfs_file_t * file )
    13201348{
     1349#if DEBUG_RPC_VFS_FILE_DESTROY
     1350uint32_t cycle = (uint32_t)hal_get_cycles();
     1351if( cycle > DEBUG_RPC_VFS_FILE_DESTROY )
     1352printk("\n[DBG] %s : thread %x enter on core[%x,%d] / cycle %d\n",
     1353__FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle );
     1354#endif
     1355
    13211356    assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");
    13221357
     
    13331368    rpc_send( cxy , &rpc );
    13341369
     1370#if DEBUG_RPC_VFS_FILE_DESTROY
     1371cycle = (uint32_t)hal_get_cycles();
     1372if( cycle > DEBUG_RPC_VFS_FILE_DESTROY )
     1373printk("\n[DBG] %s : thread %x exit / cycle %d\n",
     1374__FUNCTION__ , CURRENT_THREAD , cycle );
     1375#endif
    13351376}
    13361377
     
    13381379void rpc_vfs_file_destroy_server( xptr_t xp )
    13391380{
     1381#if DEBUG_RPC_VFS_FILE_DESTROY
     1382uint32_t cycle = (uint32_t)hal_get_cycles();
     1383if( cycle > DEBUG_RPC_VFS_FILE_DESTROY )
     1384printk("\n[DBG] %s : thread %x enter on core[%x,%d] / cycle %d\n",
     1385__FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle );
     1386#endif
     1387
    13401388    vfs_file_t * file;
    13411389
     
    13501398    vfs_file_destroy( file );
    13511399
     1400#if DEBUG_RPC_VFS_FILE_DESTROY
     1401cycle = (uint32_t)hal_get_cycles();
     1402if( cycle > DEBUG_RPC_VFS_FILE_DESTROY )
     1403printk("\n[DBG] %s : thread %x exit / cycle %d\n",
     1404__FUNCTION__ , CURRENT_THREAD , cycle );
     1405#endif
    13521406}
    13531407
     
    15361590                              error_t   * error )      // out
    15371591{
     1592#if DEBUG_RPC_VMM_GET_VSEG
     1593uint32_t cycle = (uint32_t)hal_get_cycles();
     1594if( cycle > DEBUG_RPC_VMM_GET_VSEG )
     1595printk("\n[DBG] %s : thread %x enter on core[%x,%d] / cycle %d\n",
     1596__FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle );
     1597#endif
     1598
    15381599    assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");
    15391600
     
    15551616    *error   = (error_t)rpc.args[3];
    15561617
     1618#if DEBUG_RPC_VMM_GET_VSEG
     1619cycle = (uint32_t)hal_get_cycles();
     1620if( cycle > DEBUG_RPC_VMM_GET_VSEG )
     1621printk("\n[DBG] %s : thread %x exit on core[%x,%d] / cycle %d\n",
     1622__FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle );
     1623#endif
    15571624}
    15581625
     
    15601627void rpc_vmm_get_vseg_server( xptr_t xp )
    15611628{
     1629#if DEBUG_RPC_VMM_GET_VSEG
     1630uint32_t cycle = (uint32_t)hal_get_cycles();
     1631if( cycle > DEBUG_RPC_VMM_GET_VSEG )
     1632printk("\n[DBG] %s : thread %x enter on core[%x,%d] / cycle %d\n",
     1633__FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle );
     1634#endif
     1635
    15621636    process_t   * process;
    15631637    intptr_t      vaddr;
     
    15821656    hal_remote_swd( XPTR( client_cxy , &desc->args[3] ) , (uint64_t)error );
    15831657
    1584 }
    1585 
    1586 
    1587 /////////////////////////////////////////////////////////////////////////////////////////
    1588 // [21]          Marshaling functions attached to RPC_VMM_GET_PTE  (blocking)
     1658#if DEBUG_RPC_VMM_GET_VSEG
     1659cycle = (uint32_t)hal_get_cycles();
     1660if( cycle > DEBUG_RPC_VMM_GET_VSEG )
     1661printk("\n[DBG] %s : thread %x exit on core[%x,%d] / cycle %d\n",
     1662__FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle );
     1663#endif
     1664}
     1665
     1666
     1667/////////////////////////////////////////////////////////////////////////////////////////
     1668// [21]          Marshaling functions attached to RPC_VMM_GET_VSEG  (blocking)
    15891669/////////////////////////////////////////////////////////////////////////////////////////
    15901670
     
    15981678                             error_t   * error )   // out
    15991679{
    1600     assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");
    1601 
    1602     // initialise RPC descriptor header
    1603     rpc_desc_t  rpc;
    1604     rpc.index    = RPC_VMM_GET_PTE;
     1680#if DEBUG_RPC_VMM_GET_PTE
     1681uint32_t cycle = (uint32_t)hal_get_cycles();
     1682if( cycle > DEBUG_RPC_VMM_GET_PTE )
     1683printk("\n[DBG] %s : thread %x enter on core[%x,%d] / cycle %d\n",
     1684__FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle );
     1685#endif
     1686
     1687    assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");
     1688
     1689    // initialise RPC descriptor header
     1690    rpc_desc_t  rpc;
     1691    rpc.index    = RPC_VMM_GET_VSEG;
    16051692    rpc.blocking = true;
    16061693    rpc.responses = 1;
     
    16191706    *error = (error_t)rpc.args[5];
    16201707
     1708#if DEBUG_RPC_VMM_GET_PTE
     1709cycle = (uint32_t)hal_get_cycles();
     1710if( cycle > DEBUG_RPC_VMM_GET_PTE )
     1711printk("\n[DBG] %s : thread %x exit on core[%x,%d] / cycle %d\n",
     1712__FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle );
     1713#endif
    16211714}
    16221715
     
    16241717void rpc_vmm_get_pte_server( xptr_t xp )
    16251718{
     1719#if DEBUG_RPC_VMM_GET_PTE
     1720uint32_t cycle = (uint32_t)hal_get_cycles();
     1721if( cycle > DEBUG_RPC_VMM_GET_PTE )
     1722printk("\n[DBG] %s : thread %x enter on core[%x,%d] / cycle %d\n",
     1723__FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle );
     1724#endif
     1725
    16261726    process_t   * process;
    16271727    vpn_t         vpn;
     
    16481748    hal_remote_swd( XPTR( client_cxy , &desc->args[5] ) , (uint64_t)error );
    16491749
     1750#if DEBUG_RPC_VMM_GET_PTE
     1751cycle = (uint32_t)hal_get_cycles();
     1752if( cycle > DEBUG_RPC_VMM_GET_PTE )
     1753printk("\n[DBG] %s : thread %x exit on core[%x,%d] / cycle %d\n",
     1754__FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle );
     1755#endif
    16501756}
    16511757
  • trunk/kernel/kern/scheduler.c

    r438 r440  
    125125            thread = LIST_ELEMENT( current , thread_t , sched_list );
    126126
    127             // execute RPC thread if non blocked
    128             if( (thread->blocked == 0)  &&
    129                 (thread->type == THREAD_RPC) )
    130             {
    131                 spinlock_unlock( &sched->lock );
    132                 return thread;
    133             }
    134 
    135             // execute DEV thread if non blocked and waiting queue non empty
    136             if( (thread->blocked == 0)  &&
    137                 (thread->type == THREAD_DEV) &&
    138                 (xlist_is_empty( XPTR( local_cxy , &thread->chdev->wait_root)) == 0) )
     127            // select kernel thread if non blocked and non IDLE
     128            if( (thread->blocked == 0)  && (thread->type != THREAD_IDLE) )
    139129            {
    140130                spinlock_unlock( &sched->lock );
     
    186176
    187177    list_entry_t * iter;
     178    list_entry_t * root;
    188179    thread_t     * thread;
    189180    process_t    * process;
    190181
     182    // get pointer on scheduler
    191183    scheduler_t  * sched = &core->scheduler;
     184
     185    // get pointer on user threads root
     186    root = &sched->u_root;
    192187
    193188    // take lock protecting threads lists
    194189    spinlock_lock( &sched->lock );
    195190
     191    // We use a while to scan the user threads, to control the iterator increment,
     192    // because some threads will be destroyed, and we cannot use a LIST_FOREACH()
     193
     194    // initialise list iterator
     195    iter = root->next;
     196
    196197    // scan all user threads
    197     LIST_FOREACH( &sched->u_root , iter )
    198     {
     198    while( iter != root )
     199    {
     200        // get pointer on thread
    199201        thread = LIST_ELEMENT( iter , thread_t , sched_list );
     202
     203        // increment iterator
     204        iter = iter->next;
    200205
    201206        // handle REQ_ACK
     
    219224            process = thread->process;
    220225
     226                // release FPU if required
     227                if( thread->core->fpu_owner == thread )  thread->core->fpu_owner = NULL;
     228
     229            // remove thread from scheduler (scheduler lock already taken)
     230            uint32_t threads_nr = sched->u_threads_nr;
     231
     232            assert( (threads_nr != 0) , __FUNCTION__ , "u_threads_nr cannot be 0\n" );
     233
     234            sched->u_threads_nr = threads_nr - 1;
     235            list_unlink( &thread->sched_list );
     236            if( threads_nr == 1 ) sched->u_last = NULL;
     237
     238            // delete thread
     239            thread_destroy( thread );
     240
    221241#if DEBUG_SCHED_HANDLE_SIGNALS
    222242uint32_t cycle = (uint32_t)hal_get_cycles();
    223243if( DEBUG_SCHED_HANDLE_SIGNALS < cycle )
    224 printk("\n[DBG] %s : thread %x in proces %x must be deleted / cycle %d\n",
    225 __FUNCTION__ , thread , process->pid , cycle );
    226 #endif
    227                 // release FPU if required
    228                 if( thread->core->fpu_owner == thread )  thread->core->fpu_owner = NULL;
    229 
    230             // detach thread from parent if attached
    231             if( (thread->flags & THREAD_FLAG_DETACHED) == 0 )
    232             thread_child_parent_unlink( thread->parent , XPTR( local_cxy , thread ) );
    233 
    234             // remove thread from scheduler (scheduler lock already taken)
    235             uint32_t threads_nr = sched->u_threads_nr;
    236             assert( (threads_nr != 0) , __FUNCTION__ , "u_threads_nr cannot be 0\n" );
    237             sched->u_threads_nr = threads_nr - 1;
    238             list_unlink( &thread->sched_list );
    239             if( threads_nr == 1 ) sched->u_last = NULL;
    240 
    241             // delete thread
    242             thread_destroy( thread );
    243 
    244 #if DEBUG_SCHED_HANDLE_SIGNALS
    245 cycle = (uint32_t)hal_get_cycles();
    246 if( DEBUG_SCHED_HANDLE_SIGNALS < cycle )
    247 printk("\n[DBG] %s : thread %x in process %x has been deleted / cycle %d\n",
    248 __FUNCTION__ , thread , process->pid , cycle );
     244printk("\n[DBG] %s : thread %x in proces %x (%x) deleted / cycle %d\n",
     245__FUNCTION__ , thread , process->pid , process , cycle );
    249246#endif
    250247            // destroy process descriptor if no more threads
     
    314311    {
    315312
     313if( (local_cxy == 0X1) && (core->lid == 1) && ((uint32_t)current == 0xcc000) )
     314printk("\n@@@@@ cc000 exit at cycle %d\n", (uint32_t)hal_get_cycles() );
     315
     316if( (local_cxy == 0X1) && (core->lid == 1) && ((uint32_t)next == 0xcc000) )
     317printk("\n@@@@@ cc000 enter at cycle %d\n", (uint32_t)hal_get_cycles() );
     318
    316319#if DEBUG_SCHED_YIELD
    317320uint32_t cycle = (uint32_t)hal_get_cycles();
  • trunk/kernel/kern/thread.c

    r438 r440  
    184184    thread->blocked         = THREAD_BLOCKED_GLOBAL;
    185185
    186     // reset children list
    187     xlist_root_init( XPTR( local_cxy , &thread->children_root ) );
    188     thread->children_nr = 0;
    189 
    190     // reset sched list and brothers list
     186    // reset sched list
    191187    list_entry_init( &thread->sched_list );
    192     xlist_entry_init( XPTR( local_cxy , &thread->brothers_list ) );
    193188
    194189    // reset thread info
     
    238233    // get process descriptor local copy
    239234    process = process_get_local_copy( pid );
     235
    240236    if( process == NULL )
    241237    {
     
    604600///////////////////////////////////////////////////////////////////////////////////////
    605601// TODO: check that all memory dynamically allocated during thread execution
    606 // has been released, using a cache of mmap and malloc requests. [AG]
     602// has been released, using a cache of mmap requests. [AG]
    607603///////////////////////////////////////////////////////////////////////////////////////
    608604void thread_destroy( thread_t * thread )
     
    619615__FUNCTION__, CURRENT_THREAD, thread, process->pid, cycle );
    620616#endif
    621 
    622     assert( (thread->children_nr == 0) , __FUNCTION__ , "still attached children" );
    623617
    624618    assert( (thread->local_locks == 0) , __FUNCTION__ , "all local locks not released" );
     
    663657}   // end thread_destroy()
    664658
    665 /////////////////////////////////////////////////
    666 void thread_child_parent_link( xptr_t  xp_parent,
    667                                xptr_t  xp_child )
    668 {
    669     // get extended pointers on children list root
    670     cxy_t      parent_cxy = GET_CXY( xp_parent );
    671     thread_t * parent_ptr = (thread_t *)GET_PTR( xp_parent );
    672     xptr_t     root       = XPTR( parent_cxy , &parent_ptr->children_root );
    673 
    674     // get extended pointer on children list entry
    675     cxy_t      child_cxy  = GET_CXY( xp_child );
    676     thread_t * child_ptr  = (thread_t *)GET_PTR( xp_child );
    677     xptr_t     entry      = XPTR( child_cxy , &child_ptr->brothers_list );
    678 
    679     // set the link
    680     xlist_add_first( root , entry );
    681     hal_remote_atomic_add( XPTR( parent_cxy , &parent_ptr->children_nr ) , 1 );
    682 
    683 }  // end thread_child_parent_link()
    684 
    685 ///////////////////////////////////////////////////
    686 void thread_child_parent_unlink( xptr_t  xp_parent,
    687                                  xptr_t  xp_child )
    688 {
    689     // get extended pointer on children list lock
    690     cxy_t      parent_cxy = GET_CXY( xp_parent );
    691     thread_t * parent_ptr = (thread_t *)GET_PTR( xp_parent );
    692     xptr_t     lock       = XPTR( parent_cxy , &parent_ptr->children_lock );
    693 
    694     // get extended pointer on children list entry
    695     cxy_t      child_cxy  = GET_CXY( xp_child );
    696     thread_t * child_ptr  = (thread_t *)GET_PTR( xp_child );
    697     xptr_t     entry      = XPTR( child_cxy , &child_ptr->brothers_list );
    698 
    699     // get the lock
    700     remote_spinlock_lock( lock );
    701 
    702     // remove the link
    703     xlist_unlink( entry );
    704     hal_remote_atomic_add( XPTR( parent_cxy , &parent_ptr->children_nr ) , -1 );
    705 
    706     // release the lock
    707     remote_spinlock_unlock( lock );
    708 
    709 }  // thread_child_parent_unlink()
    710 
    711659//////////////////////////////////////////////////
    712660inline void thread_set_req_ack( thread_t * target,
     
    846794
    847795}  // end thread_unblock()
     796
     797/*
    848798
    849799////////////////////////////////////
     
    875825    process_t * target_process;         // pointer on target thread process
    876826
    877     // get target thread cluster and pointer
     827    // get target thread pointer and cluster
    878828    target_cxy = GET_CXY( target_xp );
    879829    target_ptr = GET_PTR( target_xp );
     
    883833    killer_xp  = XPTR( local_cxy , killer_ptr );
    884834
    885 #if DEBUG_THREAD_KILL
     835#if DEBUG_THREAD_DELETE
    886836uint32_t cycle  = (uint32_t)hal_get_cycles;
    887 if( DEBUG_THREAD_KILL < cycle )
     837if( DEBUG_THREAD_DELETE < cycle )
    888838printk("\n[DBG] %s : thread %x enter for target thread %x / cycle %d\n",
    889839__FUNCTION__, killer_ptr, target_ptr, cycle );
     
    982932        else          hal_remote_atomic_or( process_state_xp , PROCESS_TERM_KILL );
    983933
    984 #if DEBUG_THREAD_KILL
     934#if DEBUG_THREAD_DELETE
    985935cycle  = (uint32_t)hal_get_cycles;
    986 if( DEBUG_THREAD_KILL < cycle )
     936if( DEBUG_THREAD_DELETE < cycle )
    987937printk("\n[DBG] %s : thread %x exit for thread %x / main thread / cycle %d\n",
    988938__FUNCTION__, killer_ptr, target_ptr, cycle );
     
    995945        hal_remote_atomic_or( target_flags_xp , THREAD_FLAG_REQ_DELETE );
    996946
    997 #if DEBUG_THREAD_KILL
     947#if DEBUG_THREAD_DELETE
    998948cycle  = (uint32_t)hal_get_cycles;
    999 if( DEBUG_THREAD_KILL < cycle )
     949if( DEBUG_THREAD_DELETE < cycle )
    1000950printk("\n[DBG] %s : thread %x exit for thread %x / not the main thread / cycle %d\n",
    1001951__FUNCTION__, killer_ptr, target_ptr, cycle );
     
    1005955
    1006956}  // end thread_kill()
     957
     958*/
     959
     960//////////////////////////////////////
     961void thread_delete( xptr_t  target_xp,
     962                    pid_t   pid,
     963                    bool_t  is_forced )
     964{
     965    reg_t       save_sr;                // for critical section
     966    bool_t      target_join_done;       // joining thread arrived first
     967    bool_t      target_attached;        // target thread attached
     968    xptr_t      killer_xp;              // extended pointer on killer thread (this)
     969    thread_t  * killer_ptr;             // pointer on killer thread (this)
     970    cxy_t       target_cxy;             // target thread cluster     
     971    thread_t  * target_ptr;             // pointer on target thread
     972    xptr_t      target_flags_xp;        // extended pointer on target thread <flags>
     973    uint32_t    target_flags;           // target thread <flags> value
     974    xptr_t      target_join_lock_xp;    // extended pointer on target thread <join_lock>
     975    xptr_t      target_join_xp_xp;      // extended pointer on target thread <join_xp>
     976    trdid_t     target_trdid;           // target thread identifier
     977    ltid_t      target_ltid;            // target thread local index
     978    xptr_t      joining_xp;             // extended pointer on joining thread
     979    thread_t  * joining_ptr;            // pointer on joining thread
     980    cxy_t       joining_cxy;            // joining thread cluster
     981    cxy_t       owner_cxy;              // process owner cluster
     982
     983
     984    // get target thread pointers, identifiers, and flags
     985    target_cxy      = GET_CXY( target_xp );
     986    target_ptr      = GET_PTR( target_xp );
     987    target_trdid    = hal_remote_lw( XPTR( target_cxy , &target_ptr->trdid ) );
     988    target_ltid     = LTID_FROM_TRDID( target_trdid );
     989    target_flags_xp = XPTR( target_cxy , &target_ptr->flags );
     990    target_flags    = hal_remote_lw( target_flags_xp );
     991
     992    // get killer thread pointers
     993    killer_ptr = CURRENT_THREAD;
     994    killer_xp  = XPTR( local_cxy , killer_ptr );
     995
     996#if DEBUG_THREAD_DELETE
     997uint32_t cycle  = (uint32_t)hal_get_cycles;
     998if( DEBUG_THREAD_DELETE < cycle )
     999printk("\n[DBG] %s : killer thread %x enter for target thread %x / cycle %d\n",
     1000__FUNCTION__, killer_ptr, target_ptr, cycle );
     1001#endif
     1002
     1003    // target thread cannot be the main thread, because the main thread
     1004    // must be deleted by the parent process sys_wait() function
     1005    owner_cxy = CXY_FROM_PID( pid );
     1006    assert( ((owner_cxy != target_cxy) || (target_ltid != 0)), __FUNCTION__,
     1007    "tharget thread cannot be the main thread\n" );
     1008
     1009    // block the target thread
     1010    thread_block( target_xp , THREAD_BLOCKED_GLOBAL );
     1011
     1012    // get attached from target flag descriptor
     1013    target_attached = ((hal_remote_lw( target_flags_xp ) & THREAD_FLAG_DETACHED) != 0);
     1014
     1015    // synchronize with the joining thread if the target thread is attached
     1016    if( target_attached && (is_forced == false) )
     1017    {
     1018        // build extended pointers on target thread join fields
     1019        target_join_lock_xp  = XPTR( target_cxy , &target_ptr->join_lock );
     1020        target_join_xp_xp    = XPTR( target_cxy , &target_ptr->join_xp );
     1021
     1022        // enter critical section
     1023        hal_disable_irq( &save_sr );
     1024
     1025        // take the join_lock in target thread descriptor
     1026        remote_spinlock_lock( target_join_lock_xp );
     1027
     1028        // get join_done from target thread descriptor
     1029        target_join_done = ((hal_remote_lw( target_flags_xp ) & THREAD_FLAG_JOIN_DONE) != 0);
     1030   
     1031        if( target_join_done )  // joining thread arrived first => unblock the joining thread
     1032        {
     1033            // get extended pointer on joining thread
     1034            joining_xp  = (xptr_t)hal_remote_lwd( target_join_xp_xp );
     1035            joining_ptr = GET_PTR( joining_xp );
     1036            joining_cxy = GET_CXY( joining_xp );
     1037           
     1038            // reset the join_done flag in target thread
     1039            hal_remote_atomic_and( target_flags_xp , ~THREAD_FLAG_JOIN_DONE );
     1040
     1041            // unblock the joining thread
     1042            thread_unblock( joining_xp , THREAD_BLOCKED_JOIN );
     1043
     1044            // release the join_lock in target thread descriptor
     1045            remote_spinlock_unlock( target_join_lock_xp );
     1046
     1047            // restore IRQs
     1048            hal_restore_irq( save_sr );
     1049        }
     1050        else                // this thread arrived first => register flags and deschedule
     1051        {
     1052            // set the kill_done flag in target thread
     1053            hal_remote_atomic_or( target_flags_xp , THREAD_FLAG_KILL_DONE );
     1054
     1055            // block this thread on BLOCKED_JOIN
     1056            thread_block( killer_xp , THREAD_BLOCKED_JOIN );
     1057
     1058            // set extended pointer on killer thread in target thread
     1059            hal_remote_swd( target_join_xp_xp , killer_xp );
     1060
     1061            // release the join_lock in target thread descriptor
     1062            remote_spinlock_unlock( target_join_lock_xp );
     1063
     1064            // deschedule
     1065            sched_yield( "killer thread wait joining thread" );
     1066
     1067            // restore IRQs
     1068            hal_restore_irq( save_sr );
     1069        }
     1070    }  // end if attached
     1071
     1072    // set the REQ_DELETE flag in target thread descriptor
     1073    hal_remote_atomic_or( target_flags_xp , THREAD_FLAG_REQ_DELETE );
     1074
     1075#if DEBUG_THREAD_DELETE
     1076cycle  = (uint32_t)hal_get_cycles;
     1077if( DEBUG_THREAD_DELETE < cycle )
     1078printk("\n[DBG] %s : killer thread %x exit for target thread %x / cycle %d\n",
     1079__FUNCTION__, killer_ptr, target_ptr, cycle );
     1080#endif
     1081
     1082}  // end thread_delete()
     1083
     1084
    10071085
    10081086///////////////////////
  • trunk/kernel/kern/thread.h

    r438 r440  
    171171    cxy_t               fork_cxy;        /*! target cluster  for next fork()          */
    172172
    173         xlist_entry_t       children_root;   /*! root of list of attached children        */
    174     uint32_t            children_nr;     /*! number of attached children threads      */
    175     remote_spinlock_t * children_lock;   /*! lock protecting the children list        */
    176 
    177     xlist_entry_t       brothers_list;   /*! list of attached threads to same parent  */
    178 
    179173        list_entry_t        sched_list;      /*! member of threads attached to same core  */
    180174
     
    222216 * in an existing process. It allocates memory for an user thread descriptor in the
    223217 * local cluster, and initializes it from information contained in the arguments.
    224  * The CPU context is initialized from scratch. If required by the <attr> argument,
    225  * the new thread is attached to the core specified in <attr>.
     218 * The CPU context is initialized from scratch.
    226219 * It is registered in the local process descriptor specified by the <pid> argument.
    227  * The thread descriptor pointer is returned to allow the parent thread to register it
    228  * in its children list.
    229220 * The THREAD_BLOCKED_GLOBAL bit is set => the thread must be activated to start.
    230221 ***************************************************************************************
     
    325316
    326317/***************************************************************************************
    327  * This function registers a child thread in the global list of attached
    328  * children threads of a parent thread.
    329  * It does NOT take a lock, as this function is always called by the parent thread.
    330  ***************************************************************************************
    331  * @ parent_xp : extended pointer on the parent thread descriptor.
    332  * @ child_xp  : extended pointer on the child thread descriptor.
    333  **************************************************************************************/
    334 void thread_child_parent_link( xptr_t  parent_xp,
    335                                xptr_t  child_xp );
    336 
    337 /***************************************************************************************
    338  * This function removes an user thread from the parent thread global list
    339  * of attached children threads.
    340  ***************************************************************************************
    341  * @ parent_xp : extended pointer on the parent thread descriptor.
    342  * @ child_xp  : extended pointer on the child thread descriptor.
    343  **************************************************************************************/
    344 void thread_child_parent_unlink( xptr_t parent_xp,
    345                                  xptr_t child_xp );
    346 
    347 /***************************************************************************************
    348318 * This function is used by a "blocker" thread running in the same cluster as a "target"
    349319 * thread to request the scheduler of the target thread to acknowledge that the target
     
    386356
    387357/***************************************************************************************
    388  * This function is called to handle the four pthread_cancel(), pthread_exit(),
    389  * kill() and exit() system calls. It kills a "target" thread identified by the
    390  * <thread_xp> argument. The "killer" thread can be the "target" thread, when the
    391  * <is_exit> argument is true. The "killer" thread can run in any cluster,
    392  * as it uses remote accesses.
    393  * If the "target" thread is running in "attached" mode, and the <is_forced> argument
     358 * This function is used by the four sys_thread_cancel(), sys_thread_exit(),
     359 * sys_kill() and sys_exit() system calls to delete a given thread.
     360 * It set the THREAD_BLOCKED_GLOBAL bit and set the the THREAD_FLAG_REQ_DELETE bit
     361 * in the thread descriptor identified by the <thread_xp> argument, to ask the scheduler
     362 * to asynchronously delete the target thread, at the next scheduling point.
     363 * The calling thread can run in any cluster, as it uses remote accesses, but
     364 * the target thread cannot be the main thread of the process identified by the <pid>,
     365 * because the main thread must be deleted by the parent process argument.
     366 * If the target thread is running in "attached" mode, and the <is_forced> argument
    394367 * is false, this function implements the required sychronisation with the joining
    395  * thread, blocking the "killer" thread until the pthread_join() syscall is executed.
    396  * To delete the target thread, this function sets the THREAD_FLAG_REQ_DELETE bit
    397  * and the THREAD BLOCKED_GLOBAL bit in the target thread, and the actual destruction
    398  * is asynchronously done by the scheduler at the next scheduling point.
     368 * thread, blocking the calling thread until the pthread_join() syscall is executed.
    399369 ***************************************************************************************
    400370 * @ thread_xp   : extended pointer on the target thread.
    401  * @ is_exit     : the killer thread is the target thread itself.
    402  * @ is_forced   : the killing does not depends on the attached mode.
    403  **************************************************************************************/
    404 void thread_kill( xptr_t  thread_xp,
    405                   bool_t  is_exit,
    406                   bool_t  is_forced );
     371 * @ pid         : process identifier (to get the owner cluster identifier).
     372 * @ is_forced   : the deletion does not depends on the attached mode.
     373 **************************************************************************************/
     374void thread_delete( xptr_t  thread_xp,
     375                    pid_t   pid,
     376                    bool_t  is_forced );
    407377
    408378/***************************************************************************************
  • trunk/kernel/kernel_config.h

    r439 r440  
    3838
    3939
    40 #define CONFIG_DEBUG_CHDEV_CMD_RX             0
    41 #define CONFIG_DEBUG_CHDEV_CMD_TX             0
    42 #define CONFIG_DEBUG_CHDEV_SERVER_RX          0
    43 #define CONFIG_DEBUG_CHDEV_SERVER_TX          0
    44 
    45 #define CONFIG_DEBUG_CLUSTER_INIT             0
    46 #define CONFIG_DEBUG_CLUSTER_PROCESS_COPIES   0
    47 
    48 #define CONFIG_DEBUG_DEV_TXT_RX               0
    49 #define CONFIG_DEBUG_DEV_TXT_TX               0
    50 #define CONFIG_DEBUG_DEV_IOC_RX               0
    51 #define CONFIG_DEBUG_DEV_IOC_TX               0
    52 #define CONFIG_DEBUG_DEV_NIC_RX               0
    53 #define CONFIG_DEBUG_DEV_NIC_RX               0
    54 #define CONFIG_DEBUG_DEV_FBF_RX               0
    55 #define CONFIG_DEBUG_DEV_FBF_TX               0
    56 #define CONFIG_DEBUG_DEV_DMA                  0
    57 #define CONFIG_DEBUG_DEV_MMC                  0
    58 #define CONFIG_DEBUG_DEV_PIC                  0
    59 
    60 #define CONFIG_DEBUG_DEVFS_INIT               0
    61 #define CONFIG_DEBUG_DEVFS_MOVE               0
    62 
    63 #define CONFIG_DEBUG_FATFS_INIT               0
    64 #define CONFIG_DEBUG_FATFS_MOVE               0
    65 #define CONFIG_DEBUG_FATFS_LOAD               0
    66 
    67 #define CONFIG_DEBUG_GPT_ACCESS               0
    68 
    69 #define CONFIG_DEBUG_HAL_KENTRY               0
    70 #define CONFIG_DEBUG_HAL_EXCEPTIONS           0
    71 #define CONFIG_DEBUG_HAL_IRQS                 0       
    72 #define CONFIG_DEBUG_HAL_TXT_RX               0
    73 #define CONFIG_DEBUG_HAL_TXT_TX               0
    74 #define CONFIG_DEBUG_HAL_IOC_RX               0
    75 #define CONFIG_DEBUG_HAL_IOC_TX               0
    76 
    77 #define CONFIG_DEBUG_KCM                      0
    78 #define CONFIG_DEBUG_KMEM                     0
    79 
    80 #define CONFIG_DEBUG_KERNEL_INIT              0
    81 #define CONFIG_DEBUG_KMEM_ALLOC               0
    82 
    83 #define CONFIG_DEBUG_MAPPER_GET_PAGE          0
    84 #define CONFIG_DEBUG_MAPPER_MOVE_USER         0
    85 #define CONFIG_DEBUG_MAPPER_MOVE_KERNEL       0
    86 
    87 #define CONFIG_DEBUG_PPM_ALLOC_PAGES          0
    88 #define CONFIG_DEBUG_PPM_FREE_PAGES           0
    89 
    90 #define CONFIG_DEBUG_PROCESS_COPY_INIT        0
    91 #define CONFIG_DEBUG_PROCESS_DESTROY          0
    92 #define CONFIG_DEBUG_PROCESS_INIT_CREATE      0
    93 #define CONFIG_DEBUG_PROCESS_MAKE_EXEC        1
    94 #define CONFIG_DEBUG_PROCESS_MAKE_FORK        1
    95 #define CONFIG_DEBUG_PROCESS_REFERENCE_INIT   0
    96 #define CONFIG_DEBUG_PROCESS_SIGACTION        0
    97 #define CONFIG_DEBUG_PROCESS_TXT_ATTACH       0
    98 #define CONFIG_DEBUG_PROCESS_ZERO_CREATE      0
    99 
    100 #define CONFIG_DEBUG_RPC_MARSHALING           0
    101 #define CONFIG_DEBUG_RPC_SEND                 0
    102 #define CONFIG_DEBUG_RPC_SERVER               0
    103 
    104 #define CONFIG_DEBUG_SCHED_HANDLE_SIGNALS     0
    105 #define CONFIG_DEBUG_SCHED_YIELD              0
    106 
    107 #define CONFIG_DEBUG_SYSCALLS_ERROR           2
    108 
    109 #define CONFIG_DEBUG_SYS_DISPLAY              0
    110 #define CONFIG_DEBUG_SYS_EXEC                 1
    111 #define CONFIG_DEBUG_SYS_EXIT                 0
    112 #define CONFIG_DEBUG_SYS_FG                   0
    113 #define CONFIG_DEBUG_SYS_FORK                 1
    114 #define CONFIG_DEBUG_SYS_GET_CONFIG           0
    115 #define CONFIG_DEBUG_SYS_ISATTY               0
    116 #define CONFIG_DEBUG_SYS_KILL                 1
    117 #define CONFIG_DEBUG_SYS_MMAP                 0
    118 #define CONFIG_DEBUG_SYS_READ                 0
    119 #define CONFIG_DEBUG_SYS_THREAD_CANCEL        0
    120 #define CONFIG_DEBUG_SYS_THREAD_EXIT          0
    121 #define CONFIG_DEBUG_SYS_THREAD_JOIN          0
    122 #define CONFIG_DEBUG_SYS_THREAD_SLEEP         0
    123 #define CONFIG_DEBUG_SYS_THREAD_WAKEUP        0
    124 #define CONFIG_DEBUG_SYS_WAIT                 0
    125 #define CONFIG_DEBUG_SYS_WRITE                0
    126 
    127 #define CONFIG_DEBUG_SPINLOCKS                0
    128 #define CONFIG_DEBUG_REMOTE_SPINLOCKS         0
    129 #define CONFIG_DEBUG_RWLOCKS                  0
    130 #define CONFIG_DEBUG_REMOTE_RWLOCKS           0
    131 
    132 #define CONFIG_DEBUG_THREAD_DESTROY           0
    133 #define CONFIG_DEBUG_THREAD_IDLE              0
    134 #define CONFIG_DEBUG_THREAD_KERNEL_CREATE     0
    135 #define CONFIG_DEBUG_THREAD_KILL              0
    136 #define CONFIG_DEBUG_THREAD_USER_CREATE       0
    137 #define CONFIG_DEBUG_THREAD_USER_FORK         0
    138 #define CONFIG_DEBUG_THREAD_BLOCK             0
    139 
    140 #define CONFIG_DEBUG_VFS_INODE_CREATE         0
    141 #define CONFIG_DEBUG_VFS_INODE_LOAD           0
    142 #define CONFIG_DEBUG_VFS_DENTRY_CREATE        0
    143 #define CONFIG_DEBUG_VFS_OPEN                 0
    144 #define CONFIG_DEBUG_VFS_LOOKUP               0
    145 #define CONFIG_DEBUG_VFS_ADD_CHILD            0
    146 #define CONFIG_DEBUG_VFS_MAPPER_MOVE          0
    147 #define CONFIG_DEBUG_VFS_MAPPER_LOAD          0
    148 
    149 #define CONFIG_DEBUG_VMM_CREATE_VSEG          0
    150 #define CONFIG_DEBUG_VMM_DESTROY              0
    151 #define CONFIG_DEBUG_VMM_FORK_COPY            0
    152 #define CONFIG_DEBUG_VMM_GET_ONE_PPN          0
    153 #define CONFIG_DEBUG_VMM_GET_PTE              0
    154 #define CONFIG_DEBUG_VMM_INIT                 0
    155 #define CONFIG_DEBUG_VMM_PAGE_ALLOCATE        0
    156 #define CONFIG_DEBUG_VMM_SET_COW              0
    157 #define CONFIG_DEBUG_VMM_UNMAP_VSEG           0
    158 #define CONFIG_DEBUG_VMM_UPDATE_PTE           0
     40#define DEBUG_CHDEV_CMD_RX             0
     41#define DEBUG_CHDEV_CMD_TX             0
     42#define DEBUG_CHDEV_SERVER_RX          0
     43#define DEBUG_CHDEV_SERVER_TX          0
     44
     45#define DEBUG_CLUSTER_INIT             0
     46#define DEBUG_CLUSTER_PID_ALLOC        0
     47#define DEBUG_CLUSTER_PID_RELEASE      0
     48#define DEBUG_CLUSTER_PROCESS_COPIES   0
     49
     50#define DEBUG_DEV_TXT_RX               0
     51#define DEBUG_DEV_TXT_TX               0
     52#define DEBUG_DEV_IOC_RX               0
     53#define DEBUG_DEV_IOC_TX               0
     54#define DEBUG_DEV_NIC_RX               0
     55#define DEBUG_DEV_NIC_RX               0
     56#define DEBUG_DEV_FBF_RX               0
     57#define DEBUG_DEV_FBF_TX               0
     58#define DEBUG_DEV_DMA                  0
     59#define DEBUG_DEV_MMC                  0
     60#define DEBUG_DEV_PIC                  0
     61
     62#define DEBUG_DEVFS_INIT               0
     63#define DEBUG_DEVFS_MOVE               0
     64
     65#define DEBUG_FATFS_INIT               0
     66#define DEBUG_FATFS_MOVE               0
     67#define DEBUG_FATFS_LOAD               0
     68
     69#define DEBUG_GPT_ACCESS               0
     70
     71#define DEBUG_HAL_KENTRY               0
     72#define DEBUG_HAL_EXCEPTIONS           0
     73#define DEBUG_HAL_IRQS                 0       
     74#define DEBUG_HAL_TXT_RX               0
     75#define DEBUG_HAL_TXT_TX               0
     76#define DEBUG_HAL_IOC_RX               0
     77#define DEBUG_HAL_IOC_TX               0
     78
     79#define DEBUG_KCM                      0
     80#define DEBUG_KMEM                     0
     81
     82#define DEBUG_KERNEL_INIT              0
     83#define DEBUG_KMEM_ALLOC               0
     84
     85#define DEBUG_MAPPER_GET_PAGE          0
     86#define DEBUG_MAPPER_MOVE_USER         0
     87#define DEBUG_MAPPER_MOVE_KERNEL       0
     88
     89#define DEBUG_PPM_ALLOC_PAGES          0
     90#define DEBUG_PPM_FREE_PAGES           0
     91
     92#define DEBUG_PROCESS_COPY_INIT        0
     93#define DEBUG_PROCESS_DESTROY          0
     94#define DEBUG_PROCESS_GET_LOCAL_COPY   0
     95#define DEBUG_PROCESS_INIT_CREATE      0
     96#define DEBUG_PROCESS_MAKE_EXEC        0
     97#define DEBUG_PROCESS_MAKE_FORK        0
     98#define DEBUG_PROCESS_REFERENCE_INIT   0
     99#define DEBUG_PROCESS_SIGACTION        0
     100#define DEBUG_PROCESS_TXT_ATTACH       0
     101#define DEBUG_PROCESS_ZERO_CREATE      0
     102
     103#define DEBUG_RPC_CLIENT_GENERIC       0
     104#define DEBUG_RPC_SERVER_GENERIC       0
     105
     106#define DEBUG_RPC_PMEM_GET_PAGES       0
     107#define DEBUG_RPC_PMEM_RELEASE_PAGES   0
     108#define DEBUG_RPC_PROCESS_MAKE_FORK    0
     109#define DEBUG_RPC_PROCESS_SIGACTION    0
     110#define DEBUG_RPC_VFS_DENTRY_CREATE    0
     111#define DEBUG_RPC_VFS_DENTRY_DESTROY   0
     112#define DEBUG_RPC_VFS_FILE_CREATE      0
     113#define DEBUG_RPC_VFS_FILE_DESTROY     0
     114#define DEBUG_RPC_VMM_GET_PTE          0
     115#define DEBUG_RPC_VMM_GET_VSEG         0
     116
     117#define DEBUG_SCHED_HANDLE_SIGNALS     0
     118#define DEBUG_SCHED_YIELD              0
     119
     120#define DEBUG_SYSCALLS_ERROR           2
     121
     122#define DEBUG_SYS_DISPLAY              0
     123#define DEBUG_SYS_EXEC                 2
     124#define DEBUG_SYS_EXIT                 2
     125#define DEBUG_SYS_FG                   0
     126#define DEBUG_SYS_FORK                 2
     127#define DEBUG_SYS_GET_CONFIG           0
     128#define DEBUG_SYS_ISATTY               0
     129#define DEBUG_SYS_KILL                 2
     130#define DEBUG_SYS_MMAP                 0
     131#define DEBUG_SYS_READ                 0
     132#define DEBUG_SYS_THREAD_CANCEL        0
     133#define DEBUG_SYS_THREAD_CREATE        0
     134#define DEBUG_SYS_THREAD_EXIT          0
     135#define DEBUG_SYS_THREAD_JOIN          0
     136#define DEBUG_SYS_THREAD_SLEEP         0
     137#define DEBUG_SYS_THREAD_WAKEUP        0
     138#define DEBUG_SYS_WAIT                 0
     139#define DEBUG_SYS_WRITE                0
     140
     141#define DEBUG_SPINLOCKS                0
     142#define DEBUG_REMOTE_SPINLOCKS         0
     143#define DEBUG_RWLOCKS                  0
     144#define DEBUG_REMOTE_RWLOCKS           0
     145
     146#define DEBUG_THREAD_DESTROY           0
     147#define DEBUG_THREAD_IDLE              0
     148#define DEBUG_THREAD_KERNEL_CREATE     0
     149#define DEBUG_THREAD_KILL              0
     150#define DEBUG_THREAD_USER_CREATE       0
     151#define DEBUG_THREAD_USER_FORK         0
     152#define DEBUG_THREAD_BLOCK             0
     153
     154#define DEBUG_VFS_INODE_CREATE         0
     155#define DEBUG_VFS_INODE_LOAD           0
     156#define DEBUG_VFS_DENTRY_CREATE        0
     157#define DEBUG_VFS_OPEN                 0
     158#define DEBUG_VFS_LOOKUP               0
     159#define DEBUG_VFS_ADD_CHILD            0
     160#define DEBUG_VFS_MAPPER_MOVE          0
     161#define DEBUG_VFS_MAPPER_LOAD          0
     162
     163#define DEBUG_VMM_CREATE_VSEG          0
     164#define DEBUG_VMM_DESTROY              0
     165#define DEBUG_VMM_FORK_COPY            0
     166#define DEBUG_VMM_GET_ONE_PPN          0
     167#define DEBUG_VMM_GET_PTE              0
     168#define DEBUG_VMM_HANDLE_PAGE_FAULT    0
     169#define DEBUG_VMM_INIT                 0
     170#define DEBUG_VMM_PAGE_ALLOCATE        0
     171#define DEBUG_VMM_SET_COW              0
     172#define DEBUG_VMM_UNMAP_VSEG           0
     173#define DEBUG_VMM_UPDATE_PTE           0
    159174
    160175////////////////////////////////////////////////////////////////////////////////////////////
     
    258273#define CONFIG_REMOTE_FIFO_SLOTS                    16
    259274#define CONFIG_REMOTE_FIFO_MAX_ITERATIONS   1024
    260 
    261 #define CONFIG_RPC_PENDING_MAX              8      // max requests handled by one server
    262 #define CONFIG_RPC_THREADS_MAX              8      // max number of RPC threads per core
     275#define CONFIG_RPC_THREADS_MAX              4      // max number of RPC threads per core
    263276
    264277////////////////////////////////////////////////////////////////////////////////////////////
  • trunk/kernel/libk/list.h

    r437 r440  
    7979 * This macro returns the first element of a rooted double linked list.
    8080 ***************************************************************************
    81  * @ root_ptr : pointer on the list root
     81 * @ root    : pointer on the list root
    8282 * @ type     : type of the linked elements
    8383 * @ member   : name of the list_entry_t field
     
    9090 * This macro returns the last element of a rooted double linked list.
    9191 ***************************************************************************
    92  * @ root_ptr : pointer on the list root
     92 * @ root    : pointer on the list root
    9393 * @ type     : type of the linked elements
    9494 * @ member   : name of the list_entry_t field
  • trunk/kernel/mm/mapper.c

    r438 r440  
    33 *
    44 * Authors   Mohamed Lamine Karaoui (2015)
    5  *           Alain Greiner (2016)
     5 *           Alain Greiner (2016,2017,2018)
    66 *
    77 * Copyright (c)  UPMC Sorbonne Universites
  • trunk/kernel/mm/mapper.h

    r407 r440  
    33 *
    44 * Authors   Mohamed Lamine Karaoui (2015)
    5  *           Alain Greiner (2016)
     5 *           Alain Greiner (2016,2017,2018)
    66 *
    77 * Copyright (c)  UPMC Sorbonne Universites
  • trunk/kernel/mm/vmm.c

    r438 r440  
    198198                  bool_t      mapping )
    199199{
    200     assert( (process->ref_xp == XPTR( local_cxy , process )) , __FUNCTION__,
    201     "this function must be executed in reference cluster" );
    202 
    203200    vmm_t * vmm = &process->vmm;
    204201    gpt_t * gpt = &vmm->gpt;
    205202
    206     printk("\n***** VSL and GPT for process %x\n\n",
    207     process->pid );
     203    printk("\n***** VSL and GPT for process %x in cluster %x\n\n",
     204    process->pid , local_cxy );
    208205
    209206    // get lock protecting the vseg list
     
    10361033}  // end vmm_remove_vseg()
    10371034
    1038 //////////////////////////////////////////////
    1039 error_t vmm_map_kernel_vseg( vseg_t    * vseg,
    1040                              uint32_t    attr )
    1041 {
    1042     vpn_t       vpn;        // VPN of PTE to be set
    1043     vpn_t       vpn_min;    // VPN of first PTE to be set
    1044     vpn_t       vpn_max;    // VPN of last PTE to be set (excluded)
    1045         ppn_t       ppn;        // PPN of allocated physical page
    1046         uint32_t    order;      // ln( number of small pages for one single PTE )
    1047         page_t    * page;
    1048     error_t     error;
    1049 
    1050     // check vseg type : must be a kernel vseg
    1051     uint32_t type = vseg->type;
    1052     assert( ((type==VSEG_TYPE_KCODE) || (type==VSEG_TYPE_KDATA) || (type==VSEG_TYPE_KDEV)),
    1053             __FUNCTION__ , "not a kernel vseg\n" );
    1054 
    1055     // get pointer on page table
    1056     gpt_t * gpt = &process_zero.vmm.gpt;
    1057 
    1058     // define number of small pages per PTE
    1059         if( attr & GPT_SMALL ) order = 0;   // 1 small page
    1060         else                   order = 9;   // 512 small pages
    1061 
    1062     // loop on pages in vseg
    1063     vpn_min = vseg->vpn_base;
    1064     vpn_max = vpn_min + vseg->vpn_size;
    1065         for( vpn = vpn_min ; vpn < vpn_max ; vpn++ )
    1066         {
    1067         // allocate a physical page from local PPM
    1068             kmem_req_t req;
    1069             req.type  = KMEM_PAGE;
    1070             req.size  = order;
    1071             req.flags = AF_KERNEL | AF_ZERO;
    1072             page      = (page_t *)kmem_alloc( &req );
    1073                 if( page == NULL )
    1074         {
    1075             printk("\n[ERROR] in %s : cannot allocate physical memory\n", __FUNCTION__ );
    1076             return ENOMEM;
    1077         }
    1078 
    1079         // set page table entry
    1080         ppn = ppm_page2ppn( XPTR( local_cxy , page ) );
    1081         error = hal_gpt_set_pte( gpt,
    1082                                  vpn,
    1083                                  attr,
    1084                                  ppn );
    1085                 if( error )
    1086         {
    1087             printk("\n[ERROR] in %s : cannot register PPE\n", __FUNCTION__ );
    1088             return ENOMEM;
    1089         }
    1090         }
    1091 
    1092         return 0;
    1093 
    1094 }  // end vmm_map_kernel_vseg()
    1095 
    10961035/////////////////////////////////////////
    10971036void vmm_unmap_vseg( process_t * process,
     
    11931132
    11941133//////////////////////////////////////////////////////////////////////////////////////////
    1195 // This low-level static function is called by the vmm_get_vseg() and vmm_resize_vseg()
    1196 // functions.  It scan the list of registered vsegs to find the unique vseg containing
    1197 // a given virtual address.
     1134// This low-level static function is called by the vmm_get_vseg(), vmm_get_pte(),
     1135// and vmm_resize_vseg() functions.  It scan the local VSL to find the unique vseg
     1136// containing a given virtual address.
    11981137//////////////////////////////////////////////////////////////////////////////////////////
    11991138// @ vmm     : pointer on the process VMM.
     
    13311270                       vseg_t   ** found_vseg )
    13321271{
    1333     vmm_t  * vmm = &process->vmm;
    1334 
    1335     // get vseg from vaddr
    1336     vseg_t * vseg = vseg_from_vaddr( vmm , vaddr );
     1272    xptr_t   vseg_xp;
     1273    error_t  error;
     1274    vseg_t * vseg;
     1275    vmm_t  * vmm;
     1276
     1277    // get pointer on local VMM
     1278    vmm = &process->vmm;
     1279
     1280    // try to get vseg from local VMM
     1281    vseg = vseg_from_vaddr( vmm , vaddr );
    13371282
    13381283    if( vseg == NULL )   // vseg not found in local cluster => try to get it from ref
     
    13481293
    13491294        // get extended pointer on reference vseg
    1350         xptr_t   vseg_xp;
    1351         error_t  error;
    1352 
    13531295        rpc_vmm_get_vseg_client( ref_cxy , ref_ptr , vaddr , &vseg_xp , &error );
    13541296           
    1355         if( error )   return -1;       // vseg not found => illegal user vaddr
     1297        if( error )   return -1;                // vseg not found => illegal user vaddr
    13561298       
    13571299        // allocate a vseg in local cluster
    13581300        vseg = vseg_alloc();
    13591301
    1360         if( vseg == NULL ) return -1;
     1302        if( vseg == NULL ) return -1;           // cannot allocate a local vseg
    13611303
    13621304        // initialise local vseg from reference
     
    14961438
    14971439        // initialise missing page from .elf file mapper for DATA and CODE types
    1498         // (the vseg->mapper_xp field is an extended pointer on the .elf file mapper)
     1440        // the vseg->mapper_xp field is an extended pointer on the .elf file mapper
    14991441        if( (type == VSEG_TYPE_CODE) || (type == VSEG_TYPE_DATA) )
    15001442        {
     
    15211463#endif
    15221464
     1465
    15231466            // compute extended pointer on page base
    15241467            xptr_t base_xp  = ppm_page2base( page_xp );
     
    15351478__FUNCTION__, CURRENT_THREAD, vpn );
    15361479#endif
     1480
    15371481
    15381482                if( GET_CXY( page_xp ) == local_cxy )
     
    15531497__FUNCTION__, CURRENT_THREAD, vpn );
    15541498#endif
    1555 
    15561499                if( mapper_cxy == local_cxy )
    15571500                {
     
    16441587                     ppn_t     * ppn )
    16451588{
    1646     vseg_t  * vseg;       // vseg containing VPN
    1647     ppn_t     old_ppn;    // current PTE_PPN
    1648     uint32_t  old_attr;   // current PTE_ATTR
    1649     ppn_t     new_ppn;    // new PTE_PPN
    1650     uint32_t  new_attr;   // new PTE_ATTR
    1651     error_t   error;
    1652 
    1653     // this function must be called by a thread running in the reference cluster
    1654     assert( (GET_CXY( process->ref_xp ) == local_cxy ) , __FUNCTION__ ,
    1655     "not called in the reference cluster\n" );
     1589    ppn_t      old_ppn;    // current PTE_PPN
     1590    uint32_t   old_attr;   // current PTE_ATTR
     1591    ppn_t      new_ppn;    // new PTE_PPN
     1592    uint32_t   new_attr;   // new PTE_ATTR
     1593    vmm_t    * vmm;
     1594    vseg_t   * vseg;     
     1595    error_t    error;
    16561596
    16571597#if DEBUG_VMM_GET_PTE
     
    16631603
    16641604    // get VMM pointer
    1665     vmm_t * vmm = &process->vmm;
    1666 
    1667     // get vseg pointer from reference VSL
    1668     error = vmm_get_vseg( process , vpn<<CONFIG_PPM_PAGE_SHIFT , &vseg );
    1669 
    1670     if( error )
    1671     {
    1672         printk("\n[ERROR] in %s : out of segment / process = %x / vpn = %x\n",
    1673         __FUNCTION__ , process->pid , vpn );
    1674         return error;
    1675     }
    1676 
    1677 #if( DEBUG_VMM_GET_PTE & 1 )
    1678 cycle = (uint32_t)hal_get_cycles();
    1679 if( DEBUG_VMM_GET_PTE < cycle )
    1680 printk("\n[DBG] %s : thread %x found vseg %s / vpn_base = %x / vpn_size = %x\n",
    1681 __FUNCTION__, CURRENT_THREAD, vseg_type_str(vseg->type), vseg->vpn_base, vseg->vpn_size );
    1682 #endif
     1605    vmm = &process->vmm;
     1606
     1607    // get local vseg descriptor
     1608    error =  vmm_get_vseg( process,
     1609                           ((intptr_t)vpn << CONFIG_PPM_PAGE_SHIFT),
     1610                           &vseg );
     1611
     1612    // vseg has been checked by the vmm_handle_page_fault() function
     1613    assert( (vseg != NULL) , __FUNCTION__,
     1614    "vseg undefined / vpn %x / thread %x / process %x / core[%x,%d] / cycle %d\n",
     1615    vpn, CURRENT_THREAD, process->pid, local_cxy, CURRENT_THREAD->core->lid,
     1616    (uint32_t)hal_get_cycles() );
    16831617
    16841618    if( cow )  //////////////// copy_on_write request //////////////////////
    1685                // get PTE from reference GPT
     1619               // get PTE from local GPT
    16861620               // allocate a new physical page if there is pending forks,
    16871621               // initialize it from old physical page content,
    16881622               // update PTE in all GPT copies,
    16891623    {
    1690         // access GPT to get current PTE attributes and PPN
     1624        // access local GPT to get current PTE attributes and PPN
    16911625        hal_gpt_get_pte( &vmm->gpt , vpn , &old_attr , &old_ppn );
    16921626
    1693         assert( (old_attr & GPT_MAPPED) , __FUNCTION__ ,
    1694         "PTE must be mapped for a copy-on-write exception\n" );
     1627        assert( (old_attr & GPT_MAPPED), __FUNCTION__,
     1628        "PTE unmapped for a COW exception / vpn %x / thread %x / process %x / cycle %d\n",
     1629        vpn, CURRENT_THREAD, process->pid, (uint32_t)hal_get_cycles() );
    16951630
    16961631#if( DEBUG_VMM_GET_PTE & 1 )
    1697 cycle = (uint32_t)hal_get_cycles();
    16981632if( DEBUG_VMM_GET_PTE < cycle )
    16991633printk("\n[DBG] %s : thread %x handling COW for vpn %x in process %x\n",
     
    17451679    }
    17461680    else        //////////// page_fault request ///////////////////////////
    1747                 // get PTE from reference GPT
     1681                // get PTE from local GPT
    17481682                // allocate a physical page if it is a true page fault,
     1683                // initialize it if type is FILE, CODE, or DATA,
    17491684                // register in reference GPT, but don't update GPT copies
    17501685    { 
    1751         // access GPT to get current PTE
     1686        // access local GPT to get current PTE
    17521687        hal_gpt_get_pte( &vmm->gpt , vpn , &old_attr , &old_ppn );
    17531688
     
    17561691
    17571692#if( DEBUG_VMM_GET_PTE & 1 )
    1758 cycle = (uint32_t)hal_get_cycles();
    17591693if( DEBUG_VMM_GET_PTE < cycle )
    17601694printk("\n[DBG] %s : thread %x handling page fault for vpn %x in process %x\n",
    17611695__FUNCTION__, CURRENT_THREAD, vpn, process->pid );
    17621696#endif
    1763 
    1764             // allocate new_ppn, depending on vseg type
     1697            // allocate new_ppn, and initialize the new page
    17651698            error = vmm_get_one_ppn( vseg , vpn , &new_ppn );
    17661699            if( error )
     
    18011734cycle = (uint32_t)hal_get_cycles();
    18021735if( DEBUG_VMM_GET_PTE < cycle )
    1803 printk("\n[DBG] %s : thread,%x exit / vpn %x in process %x / ppn %x / attr %x / cycle %d\n",
     1736printk("\n[DBG] %s : thread %x exit / vpn %x in process %x / ppn %x / attr %x / cycle %d\n",
    18041737__FUNCTION__, CURRENT_THREAD, vpn, process->pid, new_ppn, new_attr, cycle );
    18051738#endif
     
    18141747///////////////////////////////////////////////////
    18151748error_t vmm_handle_page_fault( process_t * process,
    1816                                vpn_t       vpn )
     1749                               vpn_t       vpn,
     1750                               bool_t      is_cow )
    18171751{
    18181752    uint32_t         attr;          // missing page attributes
    18191753    ppn_t            ppn;           // missing page PPN
     1754    vseg_t         * vseg;          // vseg containing vpn
     1755    uint32_t         type;          // vseg type
     1756    cxy_t            ref_cxy;       // reference cluster for missing vpn
     1757    process_t      * ref_ptr;       // reference process for missing vpn
    18201758    error_t          error;
    18211759
    1822 #if DEBUG_VMM_GET_PTE
     1760    thread_t       * this = CURRENT_THREAD;
     1761
     1762#if DEBUG_VMM_HANDLE_PAGE_FAULT
    18231763uint32_t cycle = (uint32_t)hal_get_cycles();
    1824 if( DEBUG_VMM_GET_PTE < cycle )
    1825 printk("\n[DBG] %s : thread %x enter for vpn %x / process %x / cycle %d\n",
    1826 __FUNCTION__ , CURRENT_THREAD , vpn , process->pid , cycle );
    1827 #endif
     1764if( DEBUG_VMM_HANDLE_PAGE_FAULT < cycle )
     1765printk("\n[DBG] %s : thread %x enter for vpn %x / process %x / core[%x,%d] / cycle %d\n",
     1766__FUNCTION__, this, vpn, process->pid, local_cxy, this->core->lid, cycle );
     1767#endif
     1768
     1769    // get local vseg (access reference VSL if required)
     1770    error = vmm_get_vseg( process , vpn<<CONFIG_PPM_PAGE_SHIFT , &vseg );
     1771
     1772    if( error )
     1773    {
     1774        printk("\n[ERROR] in %s : vpn %x / process %x / thread %x / core[%x,%d] / cycle %d\n",
     1775        __FUNCTION__, vpn, process->pid, this->trdid, local_cxy, this->core->lid,
     1776        (uint32_t)hal_get_cycles() );
     1777        return error;
     1778    }
     1779
     1780    // get segment type
     1781    type = vseg->type;
    18281782
    18291783    // get reference process cluster and local pointer
    1830     cxy_t       ref_cxy = GET_CXY( process->ref_xp );
    1831     process_t * ref_ptr = GET_PTR( process->ref_xp );
    1832 
    1833     // get missing PTE attributes and PPN from reference cluster
     1784    // for private vsegs (CODE and DATA type),
     1785    // the reference is the local process descriptor.
     1786    if( (type == VSEG_TYPE_STACK) || (type == VSEG_TYPE_CODE) )
     1787    {
     1788        ref_cxy = local_cxy;
     1789        ref_ptr = process;
     1790    }
     1791    else
     1792    {
     1793        ref_cxy = GET_CXY( process->ref_xp );
     1794        ref_ptr = GET_PTR( process->ref_xp );
     1795    }
     1796
     1797    // get missing PTE attributes and PPN
    18341798    if( local_cxy != ref_cxy ) 
    18351799    {
     
    18371801                                ref_ptr,
    18381802                                vpn,
    1839                                 false,    // page_fault
     1803                                is_cow,
    18401804                                &attr,
    18411805                                &ppn,
     
    18551819        error = vmm_get_pte( process,
    18561820                             vpn,
    1857                              false,      // page-fault
     1821                             is_cow,
    18581822                             &attr,
    18591823                             &ppn );
    18601824    }
    18611825
    1862 #if DEBUG_VMM_GET_PTE
     1826#if DEBUG_VMM_HANDLE_PAGE_FAULT
    18631827cycle = (uint32_t)hal_get_cycles();
    1864 if( DEBUG_VMM_GET_PTE < cycle )
     1828if( DEBUG_VMM_HANDLE_PAGE_FAULT < cycle )
    18651829printk("\n[DBG] %s : thread %x exit for vpn %x / process %x / cycle %d\n",
    1866 __FUNCTION__ , CURRENT_THREAD , vpn , process->pid , cycle );
     1830__FUNCTION__, this->trdid, vpn, process->pid, cycle );
    18671831#endif
    18681832
     
    18711835}  // end vmm_handle_page_fault()
    18721836
    1873 ////////////////////////////////////////////
    1874 error_t vmm_handle_cow( process_t * process,
    1875                         vpn_t       vpn )
    1876 {
    1877     uint32_t         attr;          // page attributes
    1878     ppn_t            ppn;           // page PPN
    1879     error_t          error;
    1880 
    1881 #if DEBUG_VMM_GET_PTE
    1882 uint32_t cycle = (uint32_t)hal_get_cycles();
    1883 if( DEBUG_VMM_GET_PTE < cycle )
    1884 printk("\n[DBG] %s : thread %x enter for vpn %x / process %x / cycle %d\n",
    1885 __FUNCTION__ , CURRENT_THREAD , vpn , process->pid , cycle );
    1886 #endif
    1887    
    1888     // get reference process cluster and local pointer
    1889     cxy_t       ref_cxy = GET_CXY( process->ref_xp );
    1890     process_t * ref_ptr = GET_PTR( process->ref_xp );
    1891 
    1892     // get new PTE attributes and PPN from reference cluster
    1893     if( local_cxy != ref_cxy )
    1894     {
    1895         rpc_vmm_get_pte_client( ref_cxy,
    1896                                 ref_ptr,
    1897                                 vpn,
    1898                                 true,     // copy-on-write
    1899                                 &attr,
    1900                                 &ppn,
    1901                                 &error );
    1902 
    1903         // get local VMM pointer
    1904         vmm_t * vmm = &process->vmm;
    1905 
    1906         // update local GPT
    1907         error |= hal_gpt_set_pte( &vmm->gpt,
    1908                                   vpn,
    1909                                   attr,
    1910                                   ppn );
    1911     }
    1912     else   // local cluster is the reference cluster
    1913     {
    1914         error = vmm_get_pte( process,
    1915                              vpn,
    1916                              true,      // copy-on-write
    1917                              &attr,
    1918                              &ppn );
    1919     }
    1920 
    1921 #if DEBUG_VMM_GET_PTE
    1922 cycle = (uint32_t)hal_get_cycles();
    1923 if( DEBUG_VMM_GET_PTE < cycle )
    1924 printk("\n[DBG] %s : thread %x exit for vpn %x / process %x / cycle %d\n",
    1925 __FUNCTION__ , CURRENT_THREAD , vpn , process->pid , cycle );
    1926 #endif
    1927 
    1928     return error;
    1929 
    1930 }  // end vmm_handle_cow()
    1931 
    1932 ///////////////////////////////////////////
    1933 error_t vmm_v2p_translate( bool_t    ident,
    1934                            void    * ptr,
    1935                            paddr_t * paddr )
    1936 {
    1937     process_t * process = CURRENT_THREAD->process;
    1938 
    1939     if( ident )  // identity mapping
    1940     {
    1941         *paddr = (paddr_t)PADDR( local_cxy , (lpa_t)ptr );
    1942         return 0;
    1943     }
    1944 
     1837/* deprecated April 2018  [AG]
     1838
     1839error_t vmm_v2p_translate( process_t * process,
     1840                           void      * ptr,
     1841                           paddr_t   * paddr )
     1842{
    19451843    // access page table
    19461844    error_t  error;
     
    19531851    offset = (uint32_t)( ((intptr_t)ptr) & CONFIG_PPM_PAGE_MASK );
    19541852
    1955     if( local_cxy == GET_CXY( process->ref_xp) ) // calling process is reference process
     1853    if( local_cxy == GET_CXY( process->ref_xp) ) // local process is reference process
    19561854    {
    19571855        error = vmm_get_pte( process, vpn , false , &attr , &ppn );
     
    19711869}  // end vmm_v2p_translate()
    19721870
    1973 
     1871*/
  • trunk/kernel/mm/vmm.h

    r437 r440  
    293293
    294294/*********************************************************************************************
    295  * This function allocates physical memory from the local cluster to map all PTEs
    296  * of a "kernel" vseg (type KCODE , KDATA, or KDEV) in the page table of process_zero.
    297  * WARNING : It should not be used for "user" vsegs, that must be mapped using the
    298  * "on-demand-paging" policy.
    299  *********************************************************************************************
    300  * @ vseg     : pointer on the vseg to be mapped.
    301  * @ attr     : GPT attributes to be set for all vseg pages.
    302  * @ returns 0 if success / returns ENOMEM if no memory
    303  ********************************************************************************************/
    304 error_t vmm_map_kernel_vseg( vseg_t           * vseg,
    305                              uint32_t           attr );
    306 
    307 /*********************************************************************************************
    308295 * This function removes a given region (defined by a base address and a size) from
    309296 * the VMM of a given process descriptor. This can modify the number of vsegs:
     
    335322 * @ process   : [in] pointer on process descriptor
    336323 * @ vaddr     : [in] virtual address
    337  * @ vseg      : [out] pointer on found vseg
    338  * @ returns 0 if success / returns -1 if user error.
     324 * @ vseg      : [out] local pointer on local vseg
     325 * @ returns 0 if success / returns -1 if user error (out of segment).
    339326 *********************************************************************************************/
    340327error_t vmm_get_vseg( struct process_s  * process,
     
    343330
    344331/*********************************************************************************************
    345  * This function is called by the generic exception handler when a page-fault event
    346  * has been detected for a given process in a given cluster.
    347  * - If the local cluster is the reference, it call directly the vmm_get_pte() function.
    348  * - If the local cluster is not the reference cluster, it send a RPC_VMM_GET_PTE
    349  *   to the reference cluster to get the missing PTE attributes and PPN,
    350  *   and update the local page table.
    351  *********************************************************************************************
    352  * @ process   : pointer on process descriptor.
    353  * @ vpn       : VPN of the missing PTE.
    354  * @ returns 0 if success / returns ENOMEM if no memory.
     332 * This function is called by the generic exception handler in case of page-fault,
     333 * or copy-on-write event locally detected for a given <vpn> in a given <process>
     334 * as defined by the <is_cow> argument.
     335 * 1) For a Page-Fault:
     336 * - If the local cluster is the reference, or for the STACK and CODE segment types,
     337 *   it call directly the vmm_get_pte() function to access the local VMM.
     338 * - Otherwise, it send a RPC_VMM_GET_PTE to the reference cluster to get the missing
     339 *   PTE attributes and PPN.
     340 * This function check that the missing VPN belongs to a registered vseg, allocates
     341 * a new physical page if required, and updates the local page table.
     342 * 2) For a Copy-On-Write:
     343 * - If no pending fork, it reset the COW flag and set the WRITE flag in the reference
     344 *   GPT entry, and in all the GPT copies.
     345 * - If there is a pending fork, it allocates a new physical page from the cluster defined
     346 *   by the vseg type, copies the old physical page content to the new physical page,
     347 *   and decrements the pending_fork counter in old physical page descriptor.
     348 *********************************************************************************************
     349 * @ process   : pointer on local process descriptor copy.
     350 * @ vpn       : VPN of the missing or faulting PTE.
     351 * @ is_cow    : Copy-On-Write event if true / Page-fault if false.
     352 * @ returns 0 if success / returns ENOMEM if no memory or illegal VPN.
    355353 ********************************************************************************************/
    356354error_t vmm_handle_page_fault( struct process_s * process,
    357                                vpn_t              vpn );
    358 
    359 /*********************************************************************************************
    360  * This function is called by the generic exception handler when a copy-on-write event
    361  * has been detected for a given process in a given cluster.
    362  * It takes the lock protecting the physical page, and test the pending forks counter.
    363  * If no pending fork:
    364  * - it reset the COW flag and set the WRITE flag in the reference GPT entry, and in all
    365  *   the GPT copies
    366 
    367  * If there is a pending forkon the
    368  * - It get the involved vseg pointer.
    369  * - It allocates a new physical page from the cluster defined by the vseg type.
    370  * - It copies the old physical page content to the new physical page.
    371  * - It decrements the pending_fork counter in old physical page descriptor.
    372 
    373  *********************************************************************************************
    374  * @ process   : pointer on process descriptor.
    375  * @ vpn       : VPN of the missing PTE.
    376  * @ returns 0 if success / returns ENOMEM if no memory.
    377  ********************************************************************************************/
    378 error_t vmm_handle_cow( struct process_s * process,
    379                         vpn_t              vpn );
    380 
    381 /*********************************************************************************************
    382  * This function handle both the "page-fault" and "copy-on_write" events for a given <vpn>
    383  * in a given <process>.  The <cow> argument defines the type of event to be handled.
    384  * This function must be called by a thread running in reference cluster, and the vseg
    385  * containing the searched VPN must be registered in the reference VMM.
     355                               vpn_t              vpn,
     356                               bool_t             is_cow );
     357
     358/*********************************************************************************************
     359 * This function is called by the vmm_handle_page_fault() to handle both the "page-fault",
     360 * and the "copy-on_write" events for a given <vpn> in a given <process>, as defined
     361 * by the <is_cow> argument.
     362 * The vseg containing the searched VPN must be registered in the reference VMM.
    386363 * - for an page-fault, it allocates the missing physical page from the target cluster
    387364 *   defined by the vseg type, initializes it, and updates the reference GPT, but not
     
    390367 *   initialise it from the old physical page, and updates the reference GPT and all
    391368 *   the GPT copies, for coherence.
    392  * In both cases, it calls the RPC_PMEM_GET_PAGES to get the new physical page when
    393  * the target cluster is not the reference cluster.
     369 * It calls the RPC_PMEM_GET_PAGES to get the new physical page when the target cluster
     370 * is not the local cluster,
    394371 * It returns in the <attr> and <ppn> arguments the accessed or modified PTE.
    395372 *********************************************************************************************
    396373 * @ process   : [in] pointer on process descriptor.
    397374 * @ vpn       : [in] VPN defining the missing PTE.
    398  * @ cow       : [in] "copy_on_write" if true / "page_fault" if false.
     375 * @ is_cow    : [in] "copy_on_write" if true / "page_fault" if false.
    399376 * @ attr      : [out] PTE attributes.
    400377 * @ ppn       : [out] PTE ppn.
     
    403380error_t vmm_get_pte( struct process_s * process,
    404381                     vpn_t              vpn,
    405                      bool_t             cow,
     382                     bool_t             is_cow,
    406383                     uint32_t         * attr,
    407384                     ppn_t            * ppn );
     
    428405                         ppn_t  * ppn );
    429406
    430 /*********************************************************************************************
    431  * This function makes the virtual to physical address translation, using the calling
    432  * process page table. It uses identity mapping if required by the <ident> argument.
    433  * This address translation is required to configure the peripherals having a DMA
    434  * capability, or to implement the software L2/L3 cache cohérence, using the MMC device
    435  * synchronisation primitives.
    436  * WARNING : the <ident> value must be defined by the CONFIG_KERNEL_IDENTITY_MAP parameter.
    437  *********************************************************************************************
    438  * @ ident     : [in] uses identity mapping if true.
    439  * @ ptr       : [in] virtual address.
    440  * @ paddr     : [out] pointer on buffer for physical address.
    441  * @ returns 0 if success / returns ENOMEM if error.
    442  ********************************************************************************************/
    443 error_t vmm_v2p_translate( bool_t    ident,
    444                            void    * ptr,
    445                            paddr_t * paddr );
    446 
    447 
    448407
    449408#endif /* _VMM_H_ */
  • trunk/kernel/mm/vseg.c

    r429 r440  
    143143                      VSEG_CACHE   ;
    144144    }
    145     else if( type == VSEG_TYPE_KCODE )
    146     {
    147         vseg->flags = VSEG_EXEC    |
    148                       VSEG_CACHE   |
    149                       VSEG_PRIVATE ;
    150     }
    151     else if( type == VSEG_TYPE_KDATA )
    152     {
    153         vseg->flags = VSEG_WRITE   |
    154                       VSEG_CACHE   |
    155                       VSEG_PRIVATE ;
    156     }
    157     else if( type == VSEG_TYPE_KDEV )
    158     {
    159         vseg->flags = VSEG_WRITE   ;
    160     }
    161145    else
    162146    {
  • trunk/kernel/mm/vseg.h

    r409 r440  
    4747    VSEG_TYPE_FILE   = 4,          /*! file mmap              / public  / localized       */
    4848    VSEG_TYPE_REMOTE = 5,          /*! remote mmap            / public  / localized       */
    49 
    50     VSEG_TYPE_KDATA  = 10,
    51     VSEG_TYPE_KCODE  = 11,
    52     VSEG_TYPE_KDEV   = 12,
    5349}
    5450vseg_type_t;
  • trunk/kernel/syscalls/sys_barrier.c

    r23 r440  
    22 * sys_barrier.c - Access a POSIX barrier.
    33 *
    4  * authors       Alain Greiner (2016,2017)
     4 * authors       Alain Greiner (2016,2017,2018)
    55 *
    66 * Copyright (c) UPMC Sorbonne Universites
     
    3737{
    3838        error_t      error;
    39     paddr_t      paddr;
     39    vseg_t     * vseg;
    4040 
    41     thread_t   * this = CURRENT_THREAD;
     41    thread_t   * this    = CURRENT_THREAD;
     42    process_t  * process = this->process;
    4243
    4344    // check vaddr in user vspace
    44         error = vmm_v2p_translate( false , vaddr , &paddr );
     45        error = vmm_get_vseg( process , (intptr_t)vaddr , &vseg );
     46
    4547        if( error )
    4648    {
    47         printk("\n[ERROR] in %s : illegal barrier virtual address = %x\n",
    48                __FUNCTION__ , (intptr_t)vaddr );
     49
     50#if DEBUG_SYSCALLS_ERROR
     51printk("\n[ERROR] in %s : unmapped barrier %x / thread %x / process %x\n",
     52__FUNCTION__ , (intptr_t)vaddr , this->trdid , process->pid );
     53vmm_display( process , false );
     54#endif
    4955        this->errno = error;
    5056        return -1;
     
    6167                    if( error )
    6268            {
    63                 printk("\n[ERROR] in %s : cannot create barrier = %x\n",
    64                        __FUNCTION__ , (intptr_t)vaddr );
     69
     70#if DEBUG_SYSCALLS_ERROR
     71printk("\n[ERROR] in %s : cannot create barrier %x / thread %x / process %x\n",
     72__FUNCTION__ , (intptr_t)vaddr , this->trdid , process->pid );
     73#endif
    6574                this->errno = error;
    6675                return -1;
     
    7584            if( barrier_xp == XPTR_NULL )     // user error
    7685            {
    77                 printk("\n[ERROR] in %s : barrier %x not registered\n",
    78                        __FUNCTION__ , (intptr_t)vaddr );
     86
     87#if DEBUG_SYSCALLS_ERROR
     88printk("\n[ERROR] in %s : barrier %x not registered / thread %x / process %x\n",
     89__FUNCTION__ , (intptr_t)vaddr , this->trdid , process->pid );
     90#endif
    7991                this->errno = EINVAL;
    8092                return -1;
     
    93105            if( barrier_xp == XPTR_NULL )     // user error
    94106            {
    95                 printk("\n[ERROR] in %s : barrier %x not registered\n",
    96                        __FUNCTION__ , (intptr_t)vaddr );
     107
     108#if DEBUG_SYSCALLS_ERROR
     109printk("\n[ERROR] in %s : barrier %x not registered / thread %x / process %x\n",
     110__FUNCTION__ , (intptr_t)vaddr , this->trdid , process->pid );
     111#endif
    97112                this->errno = EINVAL;
    98113                return -1;
  • trunk/kernel/syscalls/sys_condvar.c

    r23 r440  
    22 * sys_condvar.c - Access a POSIX condvar.
    33 *
    4  * Author    Alain Greiner  (2016,2017)
     4 * Author    Alain Greiner  (2016,2017,2018)
    55 *
    66 * Copyright (c) UPMC Sorbonne Universites
     
    3030#include <syscalls.h>
    3131#include <remote_condvar.h>
     32#include <remote_mutex.h>
    3233
    3334////////////////////////////////////////
     
    3637                 void         * mutex )
    3738{
    38         error_t    error;
    39     paddr_t    paddr;
     39        error_t     error;
     40    vseg_t    * vseg;
    4041 
    41     thread_t * this = CURRENT_THREAD;
     42    thread_t  * this    = CURRENT_THREAD;
     43    process_t * process = this->process;
    4244
    4345    // check condvar in user vspace
    44         error = vmm_v2p_translate( false , condvar , &paddr );
     46        error = vmm_get_vseg( process , (intptr_t)condvar , &vseg );
     47
    4548        if( error )
    4649    {
    47         printk("\n[ERROR] in %s : illegal condvar virtual address = %x\n",
    48                __FUNCTION__ , (intptr_t)condvar );
     50
     51#if DEBUG_SYSCALLS_ERROR
     52printk("\n[ERROR] in %s : unmapped condvar %x / thread %x / process %x\n",
     53__FUNCTION__ , (intptr_t)condvar , this->trdid , process->pid );
     54vmm_display( process , false );
     55#endif
    4956        this->errno = error;
    5057        return -1;
     
    6168                    if( error )
    6269            {
    63                 printk("\n[ERROR] in %s : cannot create condvar = %x\n",
    64                        __FUNCTION__ , (intptr_t)condvar );
     70
     71#if DEBUG_SYSCALLS_ERROR
     72printk("\n[ERROR] in %s : cannot create condvar %x / thread %x / process %x\n",
     73__FUNCTION__ , (intptr_t)condvar , this->trdid , process->pid );
     74#endif
    6575                this->errno = error;
    6676                return -1;
     
    7282        {
    7383            // check mutex in user vspace
    74                 error = vmm_v2p_translate( false , mutex , &paddr );
     84                error = vmm_get_vseg( process , (intptr_t)mutex , &vseg );
    7585
    7686                if( error )
    7787            {
    78                 printk("\n[ERROR] in %s : illegal condvar virtual address = %x\n",
    79                        __FUNCTION__ , (intptr_t)condvar );
     88
     89#if DEBUG_SYSCALLS_ERROR
     90printk("\n[ERROR] in %s : unmapped mutex %x / thread %x / process %x\n",
     91__FUNCTION__ , (intptr_t)mutex , this->trdid , process->pid );
     92#endif
    8093                this->errno = error;
    8194                return -1;
     
    8699            if( condvar_xp == XPTR_NULL )     // user error
    87100            {
    88                 printk("\n[ERROR] in %s : condvar %x not registered\n",
    89                        __FUNCTION__ , (intptr_t)condvar );
     101
     102#if DEBUG_SYSCALLS_ERROR
     103printk("\n[ERROR] in %s : condvar %x not registered / thread %x / process %x\n",
     104__FUNCTION__ , (intptr_t)condvar , this->trdid , process->pid );
     105#endif
    90106                this->errno = EINVAL;
    91107                return -1;
    92108            }
    93109   
    94             xptr_t mutex_xp = remote_condvar_from_ident( (intptr_t)condvar );
     110            xptr_t mutex_xp = remote_mutex_from_ident( (intptr_t)mutex );
     111
    95112            if( mutex_xp == XPTR_NULL )     // user error
    96113            {
    97                 printk("\n[ERROR] in %s : mutex %x not registered\n",
    98                        __FUNCTION__ , (intptr_t)condvar );
     114
     115#if DEBUG_SYSCALLS_ERROR
     116printk("\n[ERROR] in %s : mutex %x not registered / thread %x / process %x\n",
     117__FUNCTION__ , (intptr_t)mutex , this->trdid , process->pid );
     118#endif
    99119                this->errno = EINVAL;
    100120                return -1;
     
    112132            if( condvar_xp == XPTR_NULL )     // user error
    113133            {
    114                 printk("\n[ERROR] in %s : condvar %x not registered\n",
    115                        __FUNCTION__ , (intptr_t)condvar );
     134
     135#if DEBUG_SYSCALLS_ERROR
     136printk("\n[ERROR] in %s : condvar %x not registered / thread %x / process %x\n",
     137__FUNCTION__ , (intptr_t)condvar , this->trdid , process->pid );
     138#endif
    116139                this->errno = EINVAL;
    117140                return -1;
     
    129152            if( condvar_xp == XPTR_NULL )     // user error
    130153            {
    131                 printk("\n[ERROR] in %s : condvar %x not registered\n",
    132                        __FUNCTION__ , (intptr_t)condvar );
     154
     155#if DEBUG_SYSCALLS_ERROR
     156printk("\n[ERROR] in %s : condvar %x not registered / thread %x / process %x\n",
     157__FUNCTION__ , (intptr_t)condvar , this->trdid , process->pid );
     158#endif
    133159                this->errno = EINVAL;
    134160                return -1;
     
    146172            if( condvar_xp == XPTR_NULL )     // user error
    147173            {
    148                 printk("\n[ERROR] in %s : condvar %x not registered\n",
    149                        __FUNCTION__ , (intptr_t)condvar );
     174
     175#if DEBUG_SYSCALLS_ERROR
     176printk("\n[ERROR] in %s : condvar %x not registered / thread %x / process %x\n",
     177__FUNCTION__ , (intptr_t)condvar , this->trdid , process->pid );
     178#endif
    150179                this->errno = EINVAL;
    151180                return -1;
  • trunk/kernel/syscalls/sys_display.c

    r438 r440  
    22 * sys_display.c - display the current state of a kernel structure on TXT0
    33 *
    4  * Author    Alain Greiner (2016,2017)
     4 * Author    Alain Greiner (2016,2017,2018)
    55 * 
    66 * Copyright (c) UPMC Sorbonne Universites
     
    3838{
    3939
     40    error_t     error;
     41    vseg_t    * vseg;
     42
     43    thread_t  * this    = CURRENT_THREAD;
     44    process_t * process = this->process;
     45
    4046#if DEBUG_SYS_DISPLAY
    4147uint64_t    tm_start;
    4248uint64_t    tm_end;
    43 thread_t  * this;
    44 this     = CURRENT_THREAD;
    4549tm_start = hal_get_cycles();
    4650if( DEBUG_SYS_DISPLAY < tm_start )
     
    4953#endif
    5054
     55    ////////////////////////////
    5156    if( type == DISPLAY_STRING )
    5257    {
    53         paddr_t   paddr;
    5458        char      kbuf[256];
    5559        uint32_t  length;
    5660
    5761        char    * string = (char *)arg0;
    58  
     62
    5963        // check string in user space
    60         if( vmm_v2p_translate( false , string , &paddr ) )
    61         {
    62             printk("\n[ERROR] in %s : string buffer %x unmapped\n",
    63             __FUNCTION__ , string );
     64        error = vmm_get_vseg( process , (intptr_t)arg0 , &vseg );
     65
     66        if( error )
     67        {
     68
     69#if DEBUG_SYSCALLS_ERROR
     70printk("\n[ERROR] in %s : string buffer %x unmapped / thread %x / process %x\n",
     71__FUNCTION__ , (intptr_t)arg0 , this->trdid , process->pid );
     72#endif
     73            this->errno = EINVAL;
    6474            return -1;
    6575        }
     
    6777        // ckeck string length
    6878        length = hal_strlen_from_uspace( string );
     79
    6980        if( length >= 256 )
    7081        {
    71             printk("\n[ERROR] in %s : string length %d too large\n",
    72             __FUNCTION__ , length );
    73             return -1;
    74         }
    75 
    76         // copy string in kernel space
     82
     83#if DEBUG_SYSCALLS_ERROR
     84printk("\n[ERROR] in %s : string length %d too large / thread %x / process %x\n",
     85__FUNCTION__ , length , this->trdid , process->pid );
     86#endif
     87            this->errno = EINVAL;
     88            return -1;
     89        }
     90
     91        // copy string to kernel space
    7792        hal_strcpy_from_uspace( kbuf , string , 256 );
    7893
    7994        // print message on TXT0 kernel terminal
    80         printk("\n[USER] %s / cycle %d\n", kbuf, (uint32_t)hal_get_cycles() );
    81     }
     95        printk("\n%s / cycle %d\n", kbuf, (uint32_t)hal_get_cycles() );
     96    }
     97    //////////////////////////////
    8298    else if( type == DISPLAY_VMM )
    8399    {
     
    89105            if( process_xp == XPTR_NULL )
    90106        {
    91             printk("\n[ERROR] in %s : undefined PID %x\n",
    92             __FUNCTION__ , pid );
     107
     108#if DEBUG_SYSCALLS_ERROR
     109printk("\n[ERROR] in %s : undefined pid argument %d / thread %x / process %x\n",
     110__FUNCTION__ , pid , this->trdid , process->pid );
     111#endif
     112            this->errno = EINVAL;
    93113            return -1;
    94114        }
     
    108128        }
    109129    }
     130    ////////////////////////////////
    110131    else if( type == DISPLAY_SCHED )
    111132    {
     
    113134        lid_t lid = (lid_t)arg1;
    114135
    115         // check cluster argument
     136        // check cxy argument
    116137            if( cluster_is_undefined( cxy ) )
    117138        {
    118             printk("\n[ERROR] in %s : undefined cluster identifier %x\n",
    119             __FUNCTION__ , cxy );
    120             return -1;
    121         }
    122 
    123         // check core argument
     139
     140#if DEBUG_SYSCALLS_ERROR
     141printk("\n[ERROR] in %s : illegal cxy argument %x / thread %x / process %x\n",
     142__FUNCTION__ , cxy , this->trdid , process->pid );
     143#endif
     144            this->errno = EINVAL;
     145            return -1;
     146        }
     147
     148        // check lid argument
    124149        if( lid >= LOCAL_CLUSTER->cores_nr )
    125150        {
    126             printk("\n[ERROR] in %s : undefined local index %d\n",
    127             __FUNCTION__ , lid );
     151
     152#if DEBUG_SYSCALLS_ERROR
     153printk("\n[ERROR] in %s : illegal lid argument %x / thread %x / process %x\n",
     154__FUNCTION__ , lid , this->trdid , process->pid );
     155#endif
     156            this->errno = EINVAL;
    128157            return -1;
    129158        }
     
    138167        }
    139168    }
     169    ////////////////////////////////////////////
    140170    else if( type == DISPLAY_CLUSTER_PROCESSES )
    141171    {
    142172        cxy_t cxy = (cxy_t)arg0;
    143173
    144         // check cluster argument
     174        // check cxy argument
    145175            if( cluster_is_undefined( cxy ) )
    146176        {
    147             printk("\n[ERROR] in %s : undefined cluster identifier %x\n",
    148             __FUNCTION__ , cxy );
     177
     178#if DEBUG_SYSCALLS_ERROR
     179printk("\n[ERROR] in %s : illegal cxy argument %x / thread %x / process %x\n",
     180__FUNCTION__ , cxy , this->trdid , process->pid );
     181#endif
     182            this->errno = EINVAL;
    149183            return -1;
    150184        }
     
    152186        cluster_processes_display( cxy );
    153187    }
     188    ////////////////////////////////////////
    154189    else if( type == DISPLAY_TXT_PROCESSES )
    155190    {
     
    159194            if( txt_id >= LOCAL_CLUSTER->nb_txt_channels )
    160195        {
    161             printk("\n[ERROR] in %s : undefined TXT channel %x\n",
    162             __FUNCTION__ , txt_id );
     196
     197#if DEBUG_SYSCALLS_ERROR
     198printk("\n[ERROR] in %s : illegal txt_id argument %d / thread %x / process %x\n",
     199__FUNCTION__ , txt_id , this->trdid , process->pid );
     200#endif
     201            this->errno = EINVAL;
    163202            return -1;
    164203        }
     
    166205        process_txt_display( txt_id );
    167206    }
     207    //////////////////////////////
    168208    else if( type == DISPLAY_VFS )
    169209    {
    170         // call kernel function
    171         process_t * process = CURRENT_THREAD->process;
    172210        vfs_display( process->vfs_root_xp );
    173211    }
     212    ////////////////////////////////
    174213    else if( type == DISPLAY_CHDEV )
    175214    {
    176215        chdev_dir_display();
    177216    }
     217    ////
    178218    else
    179219    {
    180         printk("\n[ERROR] in %s : undefined display type %x\n",
    181         __FUNCTION__ , type );
     220
     221#if DEBUG_SYSCALLS_ERROR
     222printk("\n[ERROR] in %s : undefined display type %x / thread %x / process %x\n",
     223        __FUNCTION__ , type , this->trdid , process->pid );
     224#endif
     225        this->errno = EINVAL;
    182226        return -1;
    183227    }
  • trunk/kernel/syscalls/sys_exit.c

    r438 r440  
    22 * sys_exit.c - Kernel function implementing the "exit" system call.
    33 *
    4  * Author    Alain Greiner (2016,2017)
     4 * Author    Alain Greiner (2016,2017,2018)
    55 *
    66 * Copyright (c)  UPMC Sorbonne Universites
     
    4141    process_t * process = this->process;
    4242    pid_t       pid     = process->pid;
    43     trdid_t     trdid   = this->trdid;
    4443
    4544#if DEBUG_SYS_EXIT
     
    5251#endif
    5352
    54     // get owner cluster
    55     cxy_t  owner_cxy = CXY_FROM_PID( pid );
     53    // get owner process descriptor pointers an cluster
     54    xptr_t      owner_xp  = cluster_get_owner_process_from_pid( pid );
     55    cxy_t       owner_cxy = GET_CXY( owner_xp );
     56    process_t * owner_ptr = GET_PTR( owner_xp );
    5657
    57     // exit must be called by the main thread
    58     if( (owner_cxy != local_cxy) || (LTID_FROM_TRDID( trdid ) != 0) )
    59     {
    60 
    61 #if DEBUG_SYSCALLS_ERROR
    62 printk("\n[ERROR] in %s : calling thread %x is not thread 0 in owner cluster %x\n",
    63 __FUNCTION__, trdid, owner_cxy );
    64 #endif
    65          this->errno = EINVAL;
    66          return -1;
    67     }
     58    // get pointers on the process main thread
     59    thread_t * main    = hal_remote_lpt( XPTR( owner_cxy , &owner_ptr->th_tbl[0] ) );
    6860
    6961    // enable IRQs
    7062    hal_enable_irq( &save_sr );
    7163
    72     // register exit_status in owner process descriptor
    73     process->term_state = status;
     64    // mark for delete all process threads in all clusters
     65    // (but the main thread and this calling thread)
     66    process_sigaction( pid , DELETE_ALL_THREADS );
     67
     68    // disable IRQs
     69    hal_restore_irq( save_sr );
    7470
    7571#if( DEBUG_SYS_EXIT & 1)
    76 printk("\n[DBG] %s : set exit status in process term_state\n", __FUNCTION__);
     72if( tm_start > DEBUG_SYS_EXIT )
     73printk("\n[DBG] %s : thread %x deleted threads / process %x\n",
     74__FUNCTION__ , this, pid );
    7775#endif
    7876
    79     // remove process from TXT list
    80     process_txt_detach( XPTR( local_cxy , process ) );
     77    // mark for delete this calling thread when it is not the main
     78    if( (owner_cxy != local_cxy) || (main != this) )
     79    {
    8180
    8281#if( DEBUG_SYS_EXIT & 1)
    83 printk("\n[DBG] %s : removed from TXT list\n", __FUNCTION__);
     82if( tm_start > DEBUG_SYS_EXIT )
     83printk("\n[DBG] %s : calling thread %x deleted itself / process %x\n",
     84__FUNCTION__ , this, pid );
     85#endif
     86        thread_delete( XPTR( local_cxy , this ) , pid , true );
     87    }
     88         
     89    // remove process from TXT list
     90    process_txt_detach( owner_xp );
     91
     92#if( DEBUG_SYS_EXIT & 1)
     93if( tm_start > DEBUG_SYS_EXIT )
     94printk("\n[DBG] %s : thread %x removed process %x from TXT list\n",
     95__FUNCTION__ , this, pid );
    8496#endif
    8597
    86     // mark for delete all process threads in all clusters (but the main)
    87     process_sigaction( pid , DELETE_ALL_THREADS );
     98    // block the main thread
     99    thread_block( XPTR( owner_cxy , main ) , THREAD_BLOCKED_GLOBAL );
    88100
    89101#if( DEBUG_SYS_EXIT & 1)
    90 printk("\n[DBG] %s : deleted all other threads than main\n", __FUNCTION__);
    91 #endif
    92 
    93     // restore IRQs
    94     hal_restore_irq( save_sr );
    95 
    96     // block the main thread itself
    97     thread_block( XPTR( local_cxy , this ) , THREAD_BLOCKED_GLOBAL );
    98 
    99 #if( DEBUG_SYS_EXIT & 1)
    100 printk("\n[DBG] %s : blocked the main thread\n", __FUNCTION__);
     102if( tm_start > DEBUG_SYS_EXIT )
     103printk("\n[DBG] %s : thread %x blocked main thread for process %x\n",
     104__FUNCTION__, this , pid );
    101105#endif
    102106
    103107    // atomically update owner process descriptor term_state to ask
    104     // the parent process sys_wait() function to delete this main thread
    105     hal_remote_atomic_or( XPTR( local_cxy , &process->term_state ) ,
    106                           PROCESS_TERM_EXIT );
     108    // the parent process sys_wait() function to delete the main thread
     109    hal_remote_atomic_or( XPTR( owner_cxy , &process->term_state ) ,
     110                          PROCESS_TERM_EXIT | (status & 0xFF) );
    107111
    108112#if( DEBUG_SYS_EXIT & 1)
    109 printk("\n[DBG] %s : set EXIT flag in process term_state\n", __FUNCTION__);
     113if( tm_start > DEBUG_SYS_EXIT )
     114printk("\n[DBG] %s : thread %x set exit status in process %x term_state\n",
     115__FUNCTION__ , this, pid );
    110116#endif
    111117
     
    119125#endif
    120126
    121     // main thread deschedule
     127    // this thread deschedule
    122128    sched_yield( "process exit" );
    123129
  • trunk/kernel/syscalls/sys_fork.c

    r438 r440  
    7777    ref_process_xp  = parent_process_ptr->ref_xp;
    7878    ref_process_cxy = GET_CXY( ref_process_xp );
    79     ref_process_ptr = (process_t *)GET_PTR( ref_process_xp );
     79    ref_process_ptr = GET_PTR( ref_process_xp );
    8080
    8181    // check parent process children number from reference
     
    104104        }
    105105
    106 #if( DEBUG_SYS_FORK & 1)
    107 
    108 // dqdt_display();
    109 
    110 if( local_cxy == 0 )
    111 {
    112     sched_display( 0 );
    113     rpc_sched_display_client( 1 , 0 );
    114 }
    115 else
    116 {
    117     sched_display( 0 );
    118     rpc_sched_display_client( 0 , 0 );
    119 }
    120 
     106#if (DEBUG_SYS_FORK & 1 )
    121107if( DEBUG_SYS_FORK < tm_start )
    122108printk("\n[DBG] %s : parent_thread %x selected cluster %x\n",
  • trunk/kernel/syscalls/sys_get_config.c

    r438 r440  
    22 * sys_get_config.c - get hardware platform parameters.
    33 *
    4  * Author    Alain Greiner (2016,2017)
     4 * Author    Alain Greiner (2016,2017,2018)
    55 * 
    66 * Copyright (c) UPMC Sorbonne Universites
     
    3737                    uint32_t * ncores )
    3838{
    39     paddr_t   paddr;
    40     uint32_t  k_x_size;
    41     uint32_t  k_y_size;
    42     uint32_t  k_ncores;
    43 
    44         error_t   error = 0;
     39        error_t    error;
     40    vseg_t   * vseg;
     41    uint32_t   k_x_size;
     42    uint32_t   k_y_size;
     43    uint32_t   k_ncores;
    4544
    4645    thread_t  * this    = CURRENT_THREAD;
     
    5655#endif
    5756
    58     // check buffer in user space
    59     error |= vmm_v2p_translate( false , x_size  , &paddr );
    60     error |= vmm_v2p_translate( false , y_size  , &paddr );
    61     error |= vmm_v2p_translate( false , ncores  , &paddr );
     57    // check x_size buffer in user space
     58    error = vmm_get_vseg( process , (intptr_t)x_size  , &vseg );
    6259
    6360        if( error )
     
    6562
    6663#if DEBUG_SYSCALLS_ERROR
    67 printk("\n[ERROR] in %s : user buffer unmapped for thread %x in process %x\n",
    68 __FUNCTION__ , this->trdid , process->pid );
     64printk("\n[ERROR] in %s : x_size buffer unmapped / thread %x / process %x\n",
     65__FUNCTION__ , (intptr_t)x_size , this->trdid , process->pid );
     66vmm_display( process , false );
    6967#endif
    70         this->errno = EFAULT;
     68        this->errno = EINVAL;
     69                return -1;
     70        }
     71
     72    // check y_size buffer in user space
     73    error = vmm_get_vseg( process , (intptr_t)y_size  , &vseg );
     74
     75        if( error )
     76        {
     77
     78#if DEBUG_SYSCALLS_ERROR
     79printk("\n[ERROR] in %s : y_size buffer unmapped / thread %x / process %x\n",
     80__FUNCTION__ , (intptr_t)y_size , this->trdid , process->pid );
     81vmm_display( process , false );
     82#endif
     83        this->errno = EINVAL;
     84                return -1;
     85        }
     86
     87    // check ncores buffer in user space
     88    error = vmm_get_vseg( process , (intptr_t)ncores  , &vseg );
     89
     90        if( error )
     91        {
     92
     93#if DEBUG_SYSCALLS_ERROR
     94printk("\n[ERROR] in %s : ncores buffer unmapped / thread %x / process %x\n",
     95__FUNCTION__ , (intptr_t)ncores , this->trdid , process->pid );
     96vmm_display( process , false );
     97#endif
     98        this->errno = EINVAL;
    7199                return -1;
    72100        }
  • trunk/kernel/syscalls/sys_get_core.c

    r410 r440  
    3737                  uint32_t * lid )
    3838{
    39     paddr_t   paddr;
     39        error_t   error;
     40    vseg_t  * vseg;
    4041    uint32_t  k_cxy;
    4142    uint32_t  k_lid;
    42 
    43         error_t   error = 0;
    4443
    4544    thread_t  * this    = CURRENT_THREAD;
    4645    process_t * process = this->process;
    4746
    48     // check buffers in user space
    49     error |= vmm_v2p_translate( false , cxy , &paddr );
    50     error |= vmm_v2p_translate( false , lid , &paddr );
     47    // check cxy buffer in user space
     48    error = vmm_get_vseg( process , (intptr_t)cxy , &vseg );
    5149
    5250        if( error )
    5351        {
    54         printk("\n[ERROR] in %s : user buffer unmapped for thread %x in process %x\n",
    55         __FUNCTION__ , this->trdid , process->pid );
     52
     53#if DEBUG_SYSCALLS_ERROR
     54printk("\n[ERROR] in %s : cxy buffer unmapped %x / thread %x / process %x\n",
     55__FUNCTION__ , (intptr_t)cxy , this->trdid , process->pid );
     56vmm_display( process , false );
     57#endif
     58        this->errno = EFAULT;
     59                return -1;
     60        }
     61
     62    // check lid buffer in user space
     63    error = vmm_get_vseg( process , (intptr_t)lid , &vseg );
     64
     65        if( error )
     66        {
     67
     68#if DEBUG_SYSCALLS_ERROR
     69printk("\n[ERROR] in %s : lid buffer unmapped %x / thread %x / process %x\n",
     70__FUNCTION__ , (intptr_t)lid , this->trdid , process->pid );
     71vmm_display( process , false );
     72#endif
    5673        this->errno = EFAULT;
    5774                return -1;
  • trunk/kernel/syscalls/sys_get_cycle.c

    r408 r440  
    22 * sys_get_cycle.c - get calling core cycles count.
    33 *
    4  * Author    Alain Greiner (2016,2017)
     4 * Author    Alain Greiner (2016,2017,2018)
    55 * 
    66 * Copyright (c) UPMC Sorbonne Universites
     
    3535int sys_get_cycle ( uint64_t * cycle )
    3636{
    37         error_t   error;
    38     paddr_t   paddr;
    39         uint64_t  k_cycle;
     37        error_t     error;
     38    vseg_t    * vseg;
     39        uint64_t    k_cycle;
    4040
    4141    thread_t  * this    = CURRENT_THREAD;
     
    4343
    4444    // check buffer in user space
    45     error = vmm_v2p_translate( false , cycle , &paddr );
     45    error = vmm_get_vseg( process , (intptr_t)cycle , &vseg );
    4646
    4747        if( error )
    4848        {
    49         printk("\n[ERROR] in %s : user buffer unmapped for thread %x in process %x\n",
    50         __FUNCTION__ , this->trdid , process->pid );
     49
     50#if DEBUG_SYSCALLS_ERROR
     51printk("\n[ERROR] in %s : user buffer unmapped %x / thread %x / process %x\n",
     52__FUNCTION__ , (intptr_t)cycle , this->trdid , process->pid );
     53vmm_display( process , false );
     54#endif
    5155        this->errno = EFAULT;
    5256                return -1;
  • trunk/kernel/syscalls/sys_getcwd.c

    r124 r440  
    22 * sys_getcwd.c - get process current work directory
    33 *
    4  * Author    Alain Greiner (2016,2017)
     4 * Author    Alain Greiner (2016,2017,2018)
    55 *
    66 * Copyright (c)  UPMC Sorbonne Universites
     
    4040{
    4141        error_t    error;
    42     paddr_t    paddr;
     42    vseg_t   * vseg;
    4343    char       kbuf[CONFIG_VFS_MAX_PATH_LENGTH];
    4444 
     
    4949        if( nbytes < CONFIG_VFS_MAX_PATH_LENGTH )
    5050        {
    51         printk("\n[ERROR] in %s : buffer too small\n", __FUNCTION__ );
    52                 this->errno = ERANGE;
     51
     52#if DEBUG_SYSCALLS_ERROR
     53printk("\n[ERROR] in %s : buffer too small / thread %x / process %x\n",
     54__FUNCTION__ , this->trdid , process->pid );
     55#endif
     56                this->errno = EINVAL;
    5357        return -1;
    5458        }
    5559
    5660    // check buffer in user space
    57     error = vmm_v2p_translate( false , buf , &paddr );
     61    error = vmm_get_vseg( process, (intptr_t)buf , &vseg );
    5862
    5963        if( error )
    6064        {
    61         printk("\n[ERROR] in %s : user buffer unmapped\n", __FUNCTION__ );
    62                 this->errno = EFAULT;
     65
     66#if DEBUG_SYSCALLS_ERROR
     67printk("\n[ERROR] in %s : user buffer unmapped %x / thread %x / process %x\n",
     68__FUNCTION__ , (intptr_t)buf , this->trdid , process->pid );
     69#endif
     70                this->errno = EINVAL;
    6371        return -1;
    6472        }
  • trunk/kernel/syscalls/sys_kill.c

    r438 r440  
    22 * sys_kill.c - Kernel function implementing the "kill" system call.
    33 *
    4  * Author    Alain Greiner (2016,2017)
     4 * Author    Alain Greiner (2016,2017,2018)
    55 *
    66 * Copyright (c)  UPMC Sorbonne Universites
     
    3838{
    3939    xptr_t      owner_xp;      // extended pointer on target process in owner cluster
    40     cxy_t       owner_cxy;     // target process owner cluster
     40    cxy_t       owner_cxy;     // target process in owner cluster
    4141    process_t * owner_ptr;     // local pointer on target process in owner cluster
    42     xptr_t      parent_xp;     // extended pointer on parent process
    43     cxy_t       parent_cxy;    // parent process cluster
    44     process_t * parent_ptr;    // local pointer on parent process
    45     pid_t       ppid;          // parent process PID
    4642    uint32_t    retval;        // return value for the switch
    4743
    4844    thread_t  * this    = CURRENT_THREAD;
    4945    process_t * process = this->process;
    50     trdid_t     trdid   = this->trdid;
    5146
    5247#if DEBUG_SYS_KILL
     
    7570    }
    7671
    77     // process can kill itself only when calling thread is the main thread
    78     if( (pid == process->pid) && ((owner_cxy != local_cxy) || (LTID_FROM_TRDID( trdid ))) )
     72    // process cannot kill itself
     73    if( (pid == process->pid) )
    7974    {
    8075
    8176#if DEBUG_SYSCALLS_ERROR
    82 printk("\n[ERROR] in %s : only main thread can kill itself\n", __FUNCTION__ );
     77printk("\n[ERROR] in %s : process %x cannot kill itself\n", __FUNCTION__, pid );
    8378#endif
    8479        this->errno = EINVAL;
     
    8681    }
    8782
    88     // get parent process PID
    89     parent_xp  = hal_remote_lwd( XPTR( owner_cxy , &owner_ptr->parent_xp ) );
    90     parent_cxy = GET_CXY( parent_xp );
    91     parent_ptr = GET_PTR( parent_xp );
    92     ppid       = hal_remote_lw( XPTR( parent_cxy , &parent_ptr->pid ) );
    93 
    94     // check processe INIT
     83    // processe INIT cannot be killed
    9584    if( pid == 1 )
    9685    {
  • trunk/kernel/syscalls/sys_mmap.c

    r438 r440  
    33 *
    44 * Authors       Ghassan Almaless (2008,2009,2010,2011,2012)
    5  *               Alain Greiner (2016,2017)
     5 *               Alain Greiner (2016,2017,2018)
    66 *
    77 * Copyright (c) UPMC Sorbonne Universites
     
    4444    xptr_t        mapper_xp;
    4545    error_t       error;
    46     paddr_t       paddr;        // unused, but required for user space checking
    4746    reg_t         save_sr;      // required to enable IRQs
    4847
     
    6059
    6160    // check arguments in user space
    62     error = vmm_v2p_translate( false , attr , &paddr );
     61    error = vmm_get_vseg( process , (intptr_t)attr , &vseg );
    6362
    6463    if ( error )
     
    6665
    6766#if DEBUG_SYSCALLS_ERROR
    68 printk("\n[ERROR] in %s : arguments not in used space = %x\n", __FUNCTION__ , (intptr_t)attr );
     67printk("\n[ERROR] in %s : user buffer unmapped %x / thread %x / process %x\n",
     68__FUNCTION__ , (intptr_t)attr , this->trdid , process->pid );
     69vmm_display( process , false );
    6970#endif
    7071                this->errno = EINVAL;
     
    9293
    9394#if DEBUG_SYSCALLS_ERROR
    94 printk("\n[ERROR] in %s : MAP_FIXED not supported\n", __FUNCTION__ );
     95printk("\n[ERROR] in %s : MAP_FIXED not supported / thread %x / process %x\n",
     96__FUNCTION__ , this->trdid , process->pid );
    9597#endif
    9698        this->errno = EINVAL;
     
    102104
    103105#if DEBUG_SYSCALLS_ERROR
    104 printk("\n[ERROR] in %s : MAP_SHARED xor MAP_PRIVATE\n", __FUNCTION__ );
     106printk("\n[ERROR] in %s : MAP_SHARED == MAP_PRIVATE / thread %x / process %x\n",
     107__FUNCTION__ , this->trdid , process->pid );
    105108#endif
    106109        this->errno = EINVAL;
     
    124127
    125128#if DEBUG_SYSCALLS_ERROR
    126 printk("\n[ERROR] in %s: bad file descriptor = %d\n", __FUNCTION__ , fdid );
     129printk("\n[ERROR] in %s: bad file descriptor %d / thread %x / process %x\n",
     130__FUNCTION__ , fdid , this->trdid , process->pid );
    127131#endif
    128132            this->errno = EBADFD;
     
    137141
    138142#if DEBUG_SYSCALLS_ERROR
    139 printk("\n[ERROR] in %s: file %d not found\n", __FUNCTION__ , fdid );
     143printk("\n[ERROR] in %s: file %d not found / thread %x / process %x\n",
     144__FUNCTION__ , fdid , this->trdid , process->pid );
    140145#endif
    141146            this->errno = EBADFD;
     
    160165
    161166#if DEBUG_SYSCALLS_ERROR
    162 printk("\n[ERROR] in %s: offset (%d) + len (%d) >= file's size (%d)\n",
    163 __FUNCTION__, k_attr.offset, k_attr.length, size );
     167printk("\n[ERROR] in %s: offset(%d) + len(%d) >= file's size(%d) / thread %x / process %x\n",
     168__FUNCTION__, k_attr.offset, k_attr.length, size, this->trdid, process->pid );
    164169#endif
    165170            this->errno = ERANGE;
     
    173178
    174179#if DEBUG_SYSCALLS_ERROR
    175 printk("\n[ERROR] in %s: prot = %x / file_attr = %x)\n",
    176 __FUNCTION__ , k_attr.prot , file_attr );
     180printk("\n[ERROR] in %s: prot = %x / file_attr = %x / thread %x , process %x\n",
     181__FUNCTION__ , k_attr.prot , file_attr , this->trdid , process->pid );
    177182#endif
    178183                        this->errno = EACCES;
     
    206211
    207212#if DEBUG_SYSCALLS_ERROR
    208 printk("\n[ERROR] in %s : illegal cxy for MAP_REMOTE\n", __FUNCTION__ );
     213printk("\n[ERROR] in %s : illegal cxy for MAP_REMOTE / thread %x / process %x\n",
     214__FUNCTION__, this->trdid , process->pid );
    209215#endif
    210216                this->errno = EINVAL;
     
    255261
    256262#if DEBUG_SYSCALLS_ERROR
    257 printk("\n[ERROR] in %s : cannot create vseg\n", __FUNCTION__ );
     263printk("\n[ERROR] in %s : cannot create vseg / thread %x / process %x\n",
     264__FUNCTION__, this->trdid , process->pid );
    258265#endif
    259266        this->errno = ENOMEM;
  • trunk/kernel/syscalls/sys_mutex.c

    r23 r440  
    22 * sys_mutex.c - Access a POSIX mutex.
    33 *
    4  * Author    Alain Greiner (2016,2017)
     4 * Author    Alain Greiner (2016,2017,2018)
    55 *
    66 * Copyright (c) UPMC Sorbonne Universites
     
    3737               uint32_t   attr )
    3838{
    39         error_t    error;
    40     paddr_t    paddr;
     39        error_t     error;
     40    vseg_t    * vseg;
    4141
    42     thread_t * this = CURRENT_THREAD;
     42    thread_t  * this    = CURRENT_THREAD;
     43    process_t * process = this->process;
    4344
    4445    // check vaddr in user vspace
    45         error = vmm_v2p_translate( false , vaddr , &paddr );
     46        error = vmm_get_vseg( process , (intptr_t)vaddr , &vseg );
     47
    4648        if( error )
    4749    {
    48         printk("\n[ERROR] in %s : illegal virtual address = %x\n",
    49                __FUNCTION__ , (intptr_t)vaddr );
     50
     51#if DEBUG_SYSCALLS_ERROR
     52printk("\n[ERROR] in %s : mutex unmapped %x / thread %x / process %x\n",
     53__FUNCTION__ , (intptr_t)vaddr , this->trdid , process->pid );
     54vmm_display( process , false );
     55#endif
    5056        this->errno = error;
    5157        return -1;
     
    6066            if( attr != 0 )
    6167            {
    62                 printk("\n[ERROR] in %s : mutex attributes non supported yet\n",
    63                        __FUNCTION__ );
     68
     69#if DEBUG_SYSCALLS_ERROR
     70printk("\n[ERROR] in %s : mutex attribute non supported / thread %x / process %x\n",
     71__FUNCTION__ , this->trdid , process->pid );
     72#endif
    6473                this->errno = error;
    6574                return -1;
     
    7079            if( error )
    7180            {
    72                 printk("\n[ERROR] in %s : cannot create mutex\n",
    73                        __FUNCTION__ );
     81
     82#if DEBUG_SYSCALLS_ERROR
     83printk("\n[ERROR] in %s : cannot create mutex / thread %x / process %x\n",
     84__FUNCTION__ , this->trdid , process->pid );
     85#endif
    7486                this->errno = error;
    7587                return -1;
     
    8496            if( mutex_xp == XPTR_NULL )     // user error
    8597            {
    86                 printk("\n[ERROR] in %s : mutex %x not registered\n",
    87                        __FUNCTION__ , (intptr_t)vaddr );
     98
     99#if DEBUG_SYSCALLS_ERROR
     100printk("\n[ERROR] in %s : mutex %x not registered / thread %x / process %x\n",
     101__FUNCTION__ , (intptr_t)vaddr , this->trdid , process->pid );
     102#endif
    88103                this->errno = EINVAL;
    89104                return -1;
     
    102117            if( mutex_xp == XPTR_NULL )     // user error
    103118            {
    104                 printk("\n[ERROR] in %s : mutex %x not registered\n",
    105                        __FUNCTION__ , (intptr_t)vaddr );
     119
     120#if DEBUG_SYSCALLS_ERROR
     121printk("\n[ERROR] in %s : mutex %x not registered / thread %x / process %x\n",
     122__FUNCTION__ , (intptr_t)vaddr , this->trdid , process->pid );
     123#endif
    106124                this->errno = EINVAL;
    107125                return -1;
     
    120138            if( mutex_xp == XPTR_NULL )     // user error
    121139            {
    122                 printk("\n[ERROR] in %s : mutex %x not registered\n",
    123                        __FUNCTION__ , (intptr_t)vaddr );
     140
     141#if DEBUG_SYSCALLS_ERROR
     142printk("\n[ERROR] in %s : mutex %x not registered / thread %x / process %x\n",
     143__FUNCTION__ , (intptr_t)vaddr , this->trdid , process->pid );
     144#endif
    124145                this->errno = EINVAL;
    125146                return -1;
  • trunk/kernel/syscalls/sys_read.c

    r438 r440  
    22 * sys_read.c - read bytes from a file
    33 *
    4  * Author     Alain Greiner (2016,2017)
     4 * Author     Alain Greiner (2016,2017,2018)
    55 *
    66 * Copyright (c) UPMC Sorbonne Universites
     
    5858{
    5959    error_t      error;
    60     paddr_t      paddr;       // required for user space checking
     60    vseg_t     * vseg;        // required for user space checking
    6161        xptr_t       file_xp;     // remote file extended pointer
    6262    uint32_t     nbytes;      // number of bytes actually read
     
    9191
    9292    // check user buffer in user space
    93     error = vmm_v2p_translate( false , vaddr , &paddr );
     93    error = vmm_get_vseg( process , (intptr_t)vaddr , &vseg );
    9494
    9595    if ( error )
     
    9797
    9898#if DEBUG_SYSCALLS_ERROR
    99 printk("\n[ERROR] in %s : user buffer unmapped = %x\n",
    100 __FUNCTION__ , (intptr_t)vaddr );
     99printk("\n[ERROR] in %s : user buffer unmapped %x / thread %x / process %x\n",
     100__FUNCTION__ , (intptr_t)vaddr, this->trdid, process->pid );
     101vmm_display( process , false );
    101102#endif
    102103                this->errno = EINVAL;
  • trunk/kernel/syscalls/sys_sem.c

    r23 r440  
    22 * sys_sem.c - Acces a POSIX unamed semaphore.
    33 *
    4  * Authors     Alain Greiner (2016,2017)
     4 * Authors     Alain Greiner (2016,2017,2018)
    55 *
    66 * Copyright (c) UPMC Sorbonne Universites
     
    3636             uint32_t     * value )       // pointer on in/out argument
    3737{
    38         uint32_t             data;   
    39         paddr_t              paddr;
    40     error_t              error;
     38        uint32_t         data;   
     39        vseg_t         * vseg;
     40    error_t          error;
    4141
    42     thread_t           * this = CURRENT_THREAD;
     42    thread_t       * this    = CURRENT_THREAD;
     43    process_t      * process = this->process;
    4344
    4445    // check vaddr in user vspace
    45         error = vmm_v2p_translate( false , vaddr , &paddr );
     46        error = vmm_get_vseg( process , (intptr_t)vaddr , &vseg );
    4647        if( error )
    4748    {
    48         printk("\n[ERROR] in %s : illegal semaphore virtual address = %x\n",
    49                __FUNCTION__ , (intptr_t)vaddr );
    50         this->errno = error;
     49
     50#if DEBUG_SYSCALLS_ERROR
     51printk("\n[ERROR] in %s : unmapped semaphore %x / thread %x / process %x\n",
     52__FUNCTION__ , (intptr_t)vaddr, this->trdid, process->pid );
     53vmm_display( process , false );
     54#endif
     55        this->errno = EINVAL;
    5156        return -1;
    5257    }
    5358
    5459    // check value in user vspace
    55         error = vmm_v2p_translate( false , value , &paddr );
     60        error = vmm_get_vseg( process , (intptr_t)value , &vseg );
    5661        if( error )
    5762    {
    58         printk("\n[ERROR] in %s : illegal argument virtual address = %x\n",
    59                __FUNCTION__ , (intptr_t)value );
    60         this->errno = error;
    61         return -1;   
     63
     64#if DEBUG_SYSCALLS_ERROR
     65printk("\n[ERROR] in %s : unmapped value %x / thread %x / process %x\n",
     66__FUNCTION__ , (intptr_t)vaddr, this->trdid, process->pid );
     67vmm_display( process , false );
     68#endif
     69        this->errno = EINVAL;
     70        return -1;
    6271    }
    63    
     72
    6473    // execute requested operation
    6574        switch( operation )
     
    91100            if( sem_xp == XPTR_NULL )     // user error
    92101            {
    93                 printk("\n[ERROR] in %s : semaphore %x not registered\n",
    94                        __FUNCTION__ , (intptr_t)value );
     102
     103#if DEBUG_SYSCALLS_ERROR
     104printk("\n[ERROR] in %s : semaphore %x not registered / thread %x / process %x\n",
     105__FUNCTION__ , (intptr_t)value, this->trdid, process->pid );
     106#endif
    95107                this->errno = EINVAL;
    96108                return -1;
     
    114126            if( sem_xp == XPTR_NULL )     // user error
    115127            {
    116                 printk("\n[ERROR] in %s : semaphore %x not registered\n",
    117                        __FUNCTION__ , (intptr_t)value );
     128
     129#if DEBUG_SYSCALLS_ERROR
     130printk("\n[ERROR] in %s : semaphore %x not registered / thread %x / process %x\n",
     131__FUNCTION__ , (intptr_t)value, this->trdid, process->pid );
     132#endif
    118133                this->errno = EINVAL;
    119134                return -1;
     
    134149            if( sem_xp == XPTR_NULL )     // user error
    135150            {
    136                 printk("\n[ERROR] in %s : semaphore %x not registered\n",
    137                        __FUNCTION__ , (intptr_t)value );
     151
     152#if DEBUG_SYSCALLS_ERROR
     153printk("\n[ERROR] in %s : semaphore %x not registered / thread %x / process %x\n",
     154__FUNCTION__ , (intptr_t)value, this->trdid, process->pid );
     155#endif
    138156                this->errno = EINVAL;
    139157                return -1;
     
    154172            if( sem_xp == XPTR_NULL )     // user error
    155173            {
    156                 printk("\n[ERROR] in %s : semaphore %x not registered\n",
    157                        __FUNCTION__ , (intptr_t)value );
     174
     175#if DEBUG_SYSCALLS_ERROR
     176printk("\n[ERROR] in %s : semaphore %x not registered / thread %x / process %x\n",
     177__FUNCTION__ , (intptr_t)value, this->trdid, process->pid );
     178#endif
    158179                this->errno = EINVAL;
    159180                return -1;
  • trunk/kernel/syscalls/sys_stat.c

    r407 r440  
    22 * sys_stat.c - Return statistics on a file or directory.
    33 *
    4  * Author    Alain Greiner  (2016,2017)
     4 * Author    Alain Greiner  (2016,2017,2018)
    55 *
    66 * Copyright (c) UPMC Sorbonne Universites
     
    3737{
    3838    error_t       error;
    39     paddr_t       paddr;
     39    vseg_t      * vseg;         // for user space checking
    4040    struct stat   k_stat;       // kernel space
    4141    xptr_t        file_xp;
     
    4646
    4747    // check stat structure in user space
    48     error = vmm_v2p_translate( false , u_stat , &paddr );
     48    error = vmm_get_vseg( process , (intptr_t)u_stat , &vseg );
    4949
    5050        if( error )
    5151        {
    52         printk("\n[ERROR] in %s : stat structure unmapped  for thread %x in process %x\n",
    53                __FUNCTION__ , this->trdid , process->pid );
     52
     53#if DEBUG_SYCALL_ERROR
     54printk("\n[ERROR] in %s : stat structure unmapped %x / thread %x / process %x\n",
     55__FUNCTION__ , (intptr_t)u_stat , this->trdid , process->pid );
     56vmm_display( process , false );
     57#endif
    5458                this->errno = EINVAL;
    5559                return -1;
  • trunk/kernel/syscalls/sys_thread_cancel.c

    r438 r440  
    2323
    2424#include <hal_types.h>
     25#include <hal_irqmask.h>
    2526#include <hal_remote.h>
    2627#include <hal_special.h>
     
    3233int sys_thread_cancel( trdid_t trdid )
    3334{
     35    reg_t        save_sr;       // required to enable IRQs
    3436    xptr_t       target_xp;     // target thread extended pointer
     37    cxy_t        target_cxy;    // target thread cluster identifier
     38    ltid_t       target_ltid;   // target thread local index
     39    cxy_t        owner_cxy;     // process owner cluster identifier
     40    xptr_t       owner_xp;      // extended pointer on owner process
    3541
    3642    // get killer thread pointers
    3743        thread_t   * this    = CURRENT_THREAD;
    3844    process_t  * process = this->process;
     45    pid_t        pid     = process->pid;
    3946
    4047    // get extended pointer on target thread
    41         target_xp  = thread_get_xptr( process->pid , trdid );
     48        target_xp  = thread_get_xptr( pid , trdid );
    4249
    4350    // check target_xp
     
    6168#endif
    6269
    63     // cal the relevant kernel function
    64     thread_kill( target_xp,
    65                  0,           // is_exit
    66                  0 );         // is forced
     70    // get process owner cluster identifier
     71    owner_cxy = CXY_FROM_PID( pid );
     72
     73    // get target thread ltid and cluster
     74    target_cxy  = CXY_FROM_TRDID( trdid );
     75    target_ltid = LTID_FROM_TRDID( trdid );
     76
     77    // If target thread is the main thread, the process must be deleted,
     78    // This require synchronisation with parent process
     79    if( (target_cxy == owner_cxy) && (target_ltid == 0) )
     80    {
     81        // get extended pointer on owner cluster
     82        owner_xp = cluster_get_owner_process_from_pid( pid );
     83
     84        // mark for delete all threads but the main
     85        hal_enable_irq( &save_sr );
     86        process_sigaction( pid , DELETE_ALL_THREADS );
     87        hal_restore_irq( save_sr );
     88
     89        // remove process from TXT list
     90        process_txt_detach( owner_xp );
     91
     92        // block the main thread
     93        thread_block( XPTR( local_cxy ,this ) , THREAD_BLOCKED_GLOBAL );
     94
     95        // atomically update owner process descriptor term_state to ask
     96        // the parent process sys_wait() function to delete the main thread
     97        hal_remote_atomic_or( XPTR( local_cxy , &process->term_state ) ,
     98                              PROCESS_TERM_EXIT );
     99    }
     100    else
     101    {
     102        // block target thread and mark it for delete
     103        thread_delete( target_xp , pid , false );
     104    }
    67105
    68106#if DEBUG_SYS_THREAD_CANCEL
  • trunk/kernel/syscalls/sys_thread_create.c

    r438 r440  
    5353        trdid_t          trdid;            // created thread identifier
    5454        process_t      * process;          // pointer on local process descriptor
    55         paddr_t          paddr;            // unused, required by vmm_v2p_translate()
     55        vseg_t         * vseg;             // required for user space checking
    5656    cxy_t            target_cxy;       // target cluster identifier
    5757        error_t          error;
     
    6767tm_start = hal_get_cycles();
    6868if( DEBUG_SYS_THREAD_CREATE < tm_start )
    69 printk("\n[DBG] %s : thread %x enter / process %x / cycle %d\n"
    70 __FUNCTION__ , parent , process->pid, (uint32_t)tm_start );
    71 #endif
    72 
    73         // check user_attr in user space & copy to kernel space
     69printk("\n[DBG] %s : thread %x (cxy %x) enter / process %x / cycle %d\n",
     70__FUNCTION__, parent, local_cxy, process->pid, (uint32_t)tm_start );
     71#endif
     72
     73    // check trdid buffer in user space
     74    error = vmm_get_vseg( process , (intptr_t)trdid_ptr , &vseg );
     75
     76    if ( error )
     77    {
     78
     79#if DEBUG_SYSCALLS_ERROR
     80printk("\n[ERROR] in %s : trdid buffer unmapped %x / thread %x / process %x\n",
     81__FUNCTION__ , (intptr_t)trdid_ptr, parent->trdid, process->pid );
     82vmm_display( process , false );
     83#endif
     84                parent->errno = EINVAL;
     85                return -1;
     86    }
     87
     88        // check user_attr buffer in user space & copy to kernel space
    7489    if( user_attr != NULL )
    7590    {
    76             error = vmm_v2p_translate( false , user_attr , &paddr );
     91            error = vmm_get_vseg( process , (intptr_t)user_attr , &vseg );
    7792
    7893            if( error )
     
    8095
    8196#if DEBUG_SYSCALLS_ERROR
    82 printk("\n[ERROR] in %s : user_attr unmapped\n", __FUNCTION__ );
     97printk("\n[ERROR] in %s : user_attr buffer unmapped %x / thread %x / process %x\n",
     98__FUNCTION__ , (intptr_t)user_attr , parent->trdid , process->pid );
     99vmm_display( process , false );
    83100#endif
    84101                    parent->errno = EINVAL;
     
    90107
    91108        // check start_func in user space
    92         error = vmm_v2p_translate( false , start_func , &paddr );
    93 
    94         if( error )
    95         {
    96 
    97 #if DEBUG_SYSCALLS_ERROR
    98 printk("\n[ERROR] in %s : start_func unmapped\n", __FUNCTION__ );
    99 #endif
    100                 parent->errno = EINVAL;
    101                 return -1;
    102         }
    103 
    104         // check start_arg in user space
    105         if( start_arg != NULL ) error = vmm_v2p_translate( false , start_arg , &paddr );
    106 
    107         if( error )
    108         {
    109 
    110 #if DEBUG_SYSCALLS_ERROR
    111 printk("\n[ERROR] in %s : start_arg unmapped\n", __FUNCTION__ );
    112 #endif
    113                 parent->errno = EINVAL;
    114                 return -1;
    115         }
    116 
    117         // check / define attributes an target_cxy
     109        error = vmm_get_vseg( process , (intptr_t)start_func , &vseg );
     110
     111    if( error )
     112    {
     113
     114#if DEBUG_SYSCALLS_ERROR
     115printk("\n[ERROR] in %s : start_func unmapped %x / thread %x / process %x\n",
     116__FUNCTION__ , (intptr_t)start_func , parent->trdid , process->pid );
     117vmm_display( process , false );
     118#endif
     119        parent->errno = EINVAL;
     120            return -1;
     121        }
     122
     123        // check start_arg buffer in user space
     124        if( start_arg != NULL )
     125    {
     126        error = vmm_get_vseg( process , (intptr_t)start_arg , &vseg );
     127
     128            if( error )
     129            {
     130
     131#if DEBUG_SYSCALLS_ERROR
     132printk("\n[ERROR] in %s : start_arg buffer unmapped %x / thread %x / process %x\n",
     133__FUNCTION__ , (intptr_t)start_arg , parent->trdid , process->pid );
     134vmm_display( process , false );
     135#endif
     136                    parent->errno = EINVAL;
     137                    return -1;
     138        }
     139        }
     140
     141    // define attributes and target_cxy
    118142    if( user_attr != NULL )                      // user defined attributes
    119143    {
     
    125149
    126150#if DEBUG_SYSCALLS_ERROR
    127 printk("\n[ERROR] in %s : illegal target cluster = %x\n", __FUNCTION__ , kern_attr.cxy );
     151printk("\n[ERROR] in %s : illegal target cluster = %x / thread %x / process %x\n",
     152__FUNCTION__ , kern_attr.cxy, parent->trdid, process->pid );
    128153#endif
    129154                            parent->errno = EINVAL;
     
    175200
    176201#if DEBUG_SYSCALLS_ERROR
    177 printk("\n[ERROR] in %s : cannot create thread\n", __FUNCTION__ );
    178 #endif
    179                 return ENOMEM;
     202printk("\n[ERROR] in %s : cannot create new thread / thread %x / process %x\n",
     203__FUNCTION__ , parent->trdid, process->pid );
     204#endif
     205                parent->errno = ENOMEM;
     206                return -1;
    180207        }
    181208
     
    184211        hal_copy_to_uspace( trdid_ptr , &trdid , sizeof(pthread_t) );
    185212
    186     // register child in parent if required
    187     if( user_attr != NULL )
    188     {
    189             if( (kern_attr.attributes & PT_ATTR_DETACH) == 0 )
    190                 thread_child_parent_link( parent_xp , child_xp );
    191     }
    192 
    193213    // activate new thread
    194214        thread_unblock( child_xp , THREAD_BLOCKED_GLOBAL );
     
    199219tm_end = hal_get_cycles();
    200220if( DEBUG_SYS_THREAD_CREATE < tm_end )
    201 printk("\n[DBG] %s : thread %x created thread %x for process %x in cluster %x / cycle %d\n"
    202 __FUNCTION__, parent, child_ptr, process->pid, target_cxy, (uint32_t)tm_end );
     221printk("\n[DBG] %s : thread %x (cxy %x) created thread %x (cxy %x) / process %x / cycle %d\n",
     222__FUNCTION__, parent, local_cxy, child_ptr, target_cxy, process->pid, (uint32_t)tm_end );
    203223#endif
    204224
  • trunk/kernel/syscalls/sys_thread_exit.c

    r438 r440  
    22 * sys_thread_exit.c - terminates the execution of calling thread
    33 *
    4  * Authors   Alain Greiner (2016,2017)
     4 * Authors   Alain Greiner (2016,2017,2018)
    55 *
    66 * Copyright (c) UPMC Sorbonne Universites
     
    2323
    2424#include <hal_types.h>
     25#include <hal_irqmask.h>
    2526#include <thread.h>
     27#include <process.h>
    2628#include <core.h>
    2729#include <vmm.h>
     
    3234int sys_thread_exit( void * exit_value )
    3335{
    34         thread_t  * this    = CURRENT_THREAD;
    35     process_t * process = this->process;
     36    reg_t       save_sr;    // required to enable IRQs
     37    xptr_t      owner_xp;   // extended pointer on owner process
     38 
     39        thread_t  * this      = CURRENT_THREAD;
     40    trdid_t     trdid     = this->trdid;
     41    process_t * process   = this->process;
     42    pid_t       pid       = process->pid;
     43    cxy_t       owner_cxy = CXY_FROM_PID( pid );
    3644
    3745    // check exit_value argument
     
    4048
    4149#if DEBUG_SYSCALLS_ERROR
    42 printk("\n[ERROR] in %s : exit_value argument must be NULL for thread %x in process %x\n",
    43 __FUNCTION__ , exit_value, this->trdid , process->pid );
     50printk("\n[ERROR] in %s : exit_value argument must be NULL / thread %x in process %x\n",
     51__FUNCTION__ , this , pid );
    4452#endif
    4553        this->errno = EINVAL;
     
    5361if( DEBUG_SYS_THREAD_EXIT < tm_start )
    5462printk("\n[DBG] %s : thread %x enter / process %x / cycle %d\n",
    55 __FUNCTION__ , this, process->pid , (uint32_t)tm_start );
     63__FUNCTION__ , this, pid , (uint32_t)tm_start );
    5664#endif
    5765
    58     // cal the relevant kernel function
    59     thread_kill( XPTR( local_cxy , this ),
    60                  1,           // is_exit
    61                  0 );         // is forced
     66    // If calling thread is the main thread, the process must be deleted.
     67    // This require to delete all process threads and synchronise with parent process
     68    if( (local_cxy == owner_cxy) && (LTID_FROM_TRDID(trdid) == 0) )
     69    {
     70        // get extended pointer on owner cluster
     71        owner_xp = cluster_get_owner_process_from_pid( pid );
     72
     73        // mark for delete all threads but the main
     74        hal_enable_irq( &save_sr );
     75        process_sigaction( pid , DELETE_ALL_THREADS );
     76        hal_restore_irq( save_sr );
     77
     78        // remove process from TXT list
     79        process_txt_detach( owner_xp );
     80
     81        // block the main thread
     82        thread_block( XPTR( local_cxy , this ) , THREAD_BLOCKED_GLOBAL );
     83
     84        // atomically update owner process descriptor term_state to ask
     85        // the parent process sys_wait() function to delete the main thread
     86        hal_remote_atomic_or( XPTR( local_cxy , &process->term_state ) ,
     87                              PROCESS_TERM_EXIT );
     88    }
     89    else
     90    {
     91        // block calling thread and mark it for delete,
     92        thread_delete( XPTR( local_cxy , this ) , pid , false );
     93    }
    6294
    6395#if DEBUG_SYS_THREAD_EXIT
     
    6597if( DEBUG_SYS_THREAD_EXIT < tm_end )
    6698printk("\n[DBG] %s : thread %x exit / process %x / cost %d / cycle %d\n",
    67 __FUNCTION__, this, this->process->pid, (uint32_t)(tm_end - tm_start), (uint32_t)tm_end );
     99__FUNCTION__, this, pid, (uint32_t)(tm_end - tm_start), (uint32_t)tm_end );
    68100#endif
    69101
    70     // deschedule <=> suicide, because blocked by thread_kill()
     102    // deschedule <=> suicide, because blocked by thread_delete()
    71103    sched_yield( "suicide after thread_exit" );
    72104   
  • trunk/kernel/syscalls/sys_timeofday.c

    r407 r440  
    22 * sys_timeofday.c - Get current time
    33 *
    4  * Author    Alain Greiner (2016,2017)
     4 * Author    Alain Greiner (2016,2017,2018)
    55 *
    66 * Copyright (c) UPMC Sorbonne Universites
     
    3737{
    3838        error_t        error;
    39     paddr_t        paddr;
     39    vseg_t       * vseg;
    4040
    4141        uint32_t       tm_s;
     
    5050    if( tz )
    5151    {
    52         printk("\n[ERROR] in %s for thread %x in process %x : tz argument must be NULL\n",
    53                __FUNCTION__ , this->trdid , process->pid );
     52
     53#if DEBUG_SYSCALLS_ERROR
     54printk("\n[ERROR] in %s for thread %x in process %x : tz argument must be NULL\n",
     55__FUNCTION__ , this->trdid , process->pid );
     56#endif
    5457        this->errno = EINVAL;
    5558        return -1;
     
    5760 
    5861    // check tv
    59     error = vmm_v2p_translate( false , tv , &paddr );
     62    error = vmm_get_vseg( process , (intptr_t)tv , &vseg );
    6063
    6164    if( error )
    6265    {
    63         printk("\n[ERROR] in %s for thread %x in process %x : tv argument unmapped\n",
    64         __FUNCTION__ , this->trdid , process->pid );
     66
     67#if DEBUG_SYSCALLS_ERROR
     68printk("\n[ERROR] in %s : user buffer tz unmapped / thread %x / process %x\n",
     69__FUNCTION__ , (intptr_t)tz , this->trdid , process->pid );
     70vmm_display( process , false );
     71#endif
    6572        this->errno = EINVAL;
    6673        return -1;
  • trunk/kernel/syscalls/sys_wait.c

    r438 r440  
    22 * sys_wait.c - wait termination or blocking of a child process.
    33 *
    4  * Author    Alain Greiner (2016,2017)
     4 * Author    Alain Greiner (2016,2017,2018)
    55 * 
    66 * Copyright (c) UPMC Sorbonne Universites
     
    3535{
    3636        error_t     error;
    37     paddr_t     paddr;
     37    vseg_t    * vseg;
    3838    xptr_t      iter_xp;
    3939    xptr_t      child_xp;
     
    5959
    6060    // check status in user space
    61     error = vmm_v2p_translate( false , status , &paddr );
     61    error = vmm_get_vseg( process, (intptr_t)status , &vseg );
    6262
    6363        if( error )
     
    6565
    6666#if DEBUG_SYSCALLS_ERROR
    67 printk("\n[ERROR] in %s : status buffer unmapped for thread %x in process %x\n",
    68 __FUNCTION__ , this->trdid , process->pid );
     67printk("\n[ERROR] in %s : status buffer %x unmapped for thread %x in process %x\n",
     68__FUNCTION__ , (intptr_t)status, this->trdid , process->pid );
     69vmm_display( process , false );
    6970#endif
    7071        this->errno = EINVAL;
  • trunk/kernel/syscalls/sys_write.c

    r438 r440  
    22 * sys_write.c - write bytes to a file
    33 *
    4  * Author        Alain Greiner (2016,2017)
     4 * Author        Alain Greiner (2016,2017,2018)
    55 *
    66 * Copyright (c) UPMC Sorbonne Universites
     
    3535/* TODO: concurrent user page(s) unmap need to be handled [AG] */
    3636
     37extern uint32_t enter_sys_write;
     38extern uint32_t enter_devfs_write;
     39extern uint32_t enter_txt_write;
     40extern uint32_t enter_chdev_cmd_write;
     41extern uint32_t enter_chdev_server_write;
     42extern uint32_t enter_tty_cmd_write;
     43extern uint32_t enter_tty_isr_write;
     44extern uint32_t exit_tty_isr_write;
     45extern uint32_t exit_tty_cmd_write;
     46extern uint32_t exit_chdev_server_write;
     47extern uint32_t exit_chdev_cmd_write;
     48extern uint32_t exit_txt_write;
     49extern uint32_t exit_devfs_write;
     50extern uint32_t exit_sys_write;
     51
    3752//////////////////////////////////
    3853int sys_write( uint32_t   file_id,
     
    4156{
    4257    error_t      error;
    43     paddr_t      paddr;           // required for user space checking
     58    vseg_t     * vseg;            // required for user space checking
    4459        xptr_t       file_xp;         // remote file extended pointer
    4560    uint32_t     nbytes;          // number of bytes actually written
    4661    reg_t        save_sr;         // required to enable IRQs during syscall
    4762
    48 #if (DEBUG_SYS_WRITE_DEBUG & 1)
    49 enter_sys_read = (uint32_t)tm_start;
     63#if (DEBUG_SYS_WRITE & 1)
     64enter_sys_write = (uint32_t)tm_start;
    5065#endif
    5166
     
    7489
    7590    // check user buffer in user space
    76     error = vmm_v2p_translate( false , vaddr , &paddr );
     91    error = vmm_get_vseg( process , (intptr_t)vaddr , &vseg );
    7792
    7893    if ( error )
     
    8095
    8196#if DEBUG_SYSCALLS_ERROR
    82 printk("\n[ERROR] in %s : user buffer unmapped = %x\n", __FUNCTION__ , (intptr_t)vaddr );
     97printk("\n[ERROR] in %s : user buffer unmapped %x / thread %x / process %x\n",
     98__FUNCTION__ , (intptr_t)vaddr, this->trdid, process->pid );
     99vmm_display( process , false );
    83100#endif
    84101                this->errno = EINVAL;
  • trunk/kernel/syscalls/syscalls.h

    r437 r440  
    169169/******************************************************************************************
    170170 * [10] This function implement the exit system call terminating a POSIX process.
     171 * It can be called by any thread running in any cluster.
     172 * It uses both remote accesses to access the owner process descriptor, ans the
     173 * RPC_PROCESS_SIGACTION to delete remote process and thread descriptors
    171174 * In the present implementation, this function implements actually the _exit():
    172  * - it does not flush open ourput steams.
     175 * - it does not flush open output streams.
    173176 * - it does not close open streams.
    174177 ******************************************************************************************
     
    619622int sys_fg( pid_t   pid );
    620623
     624int sys_place( uint32_t cxy,
     625               uint32_t lid );
    621626
    622627#endif  // _SYSCALLS_H_
Note: See TracChangeset for help on using the changeset viewer.