Changeset 683


Ignore:
Timestamp:
Jan 13, 2021, 12:36:17 AM (3 years ago)
Author:
alain
Message:

All modifications required to support the <tcp_chat> application
including error recovery in case of packet loss.A

Location:
trunk/kernel
Files:
1 deleted
75 edited

Legend:

Unmodified
Added
Removed
  • trunk/kernel/Makefile

    r675 r683  
    107107              build/mm/page.o               \
    108108              build/mm/kcm.o                \
    109               build/mm/khm.o                \
    110109              build/mm/mapper.o             \
    111110              build/mm/kmem.o
     
    179178              build/syscalls/sys_wait.o
    180179
    181 SYS_OBJS_4  = build/syscalls/sys_get_config.o      \
    182               build/syscalls/sys_get_core_id.o     \
    183               build/syscalls/sys_get_cycle.o       \
     180SYS_OBJS_4  = build/syscalls/sys_get.o             \
    184181              build/syscalls/sys_display.o         \
    185182              build/syscalls/sys_place_fork.o      \
     
    188185              build/syscalls/sys_trace.o           \
    189186              build/syscalls/sys_fg.o              \
    190               build/syscalls/sys_is_fg.o
     187              build/syscalls/sys_is_fg.o           \
     188              build/syscalls/sys_fbf.o
    191189
    192190SYS_OBJS_5  = build/syscalls/sys_exit.o            \
    193191              build/syscalls/sys_sync.o            \
    194192              build/syscalls/sys_fsync.o           \
    195               build/syscalls/sys_get_best_core.o   \
    196               build/syscalls/sys_get_nb_cores.o    \
    197               build/syscalls/sys_get_thread_info.o \
    198               build/syscalls/sys_fbf.o             \
    199193              build/syscalls/sys_socket.o
    200194
  • trunk/kernel/devices/dev_fbf.c

    r674 r683  
    159159                                intptr_t * user_buffer )
    160160{
    161     kmem_req_t     req;
    162161    fbf_window_t * window;      // window descriptor (created in local cluster)
    163162    vseg_t       * vseg;        // vseg descriptor (created in reference cluster)
     
    202201 
    203202    // allocate memory for the window descriptor in local cluster
    204     req.type   = KMEM_KCM;
    205     req.order  = bits_log2( sizeof(fbf_window_t) );
    206     req.flags  = AF_ZERO | AF_KERNEL;
    207     window     = kmem_alloc( &req );
     203    window  = kmem_alloc( bits_log2(sizeof(fbf_window_t)) , AF_ZERO );
    208204
    209205    if( window == NULL )
     
    256252        printk("\n[ERROR] in %s / thread[%x,%x] cannot create vseg in reference cluster\n",
    257253        __FUNCTION__, process->pid, this->trdid );
    258         req.ptr = (void *)window;
    259         kmem_free( &req );
     254        kmem_free( window , bits_log2(sizeof(fbf_window_t)) );
    260255        return -1;
    261256    }
     
    281276        printk("\n[ERROR] in %s / thread[%x,%x] cannot allocate buffer for window\n",
    282277        __FUNCTION__, process->pid, this->trdid );
    283         req.ptr = (void *)window;
    284         kmem_free( &req );
     278        kmem_free( window , bits_log2(sizeof(fbf_window_t)) );
    285279        vmm_remove_vseg( process , vseg );
    286280        return -1;
     
    521515error_t dev_fbf_delete_window( uint32_t  wid )
    522516{
    523     kmem_req_t     req;
    524 
    525517    thread_t  * this    = CURRENT_THREAD;
    526518    process_t * process = this->process;
     
    581573 
    582574    // 8. release memory allocated for window descriptor
    583     req.type = KMEM_KCM;
    584     req.ptr  = window_ptr;
    585     kmem_remote_free( window_cxy , &req );
     575    kmem_remote_free( window_cxy , window_ptr , bits_log2(sizeof(fbf_window_t)) );
    586576
    587577    // 9. release the associated vseg
  • trunk/kernel/devices/dev_nic.c

    r674 r683  
    1 
    21/*
    32 * dev_nic.c - NIC (Network Controler) generic device API implementation.
     
    4645void dev_nic_init( chdev_t * chdev )
    4746{
     47
     48assert( __FUNCTION__ , (chdev->func == DEV_FUNC_NIC) ,
     49"bad func value");
     50
    4851    thread_t * new_thread;
    4952    error_t    error;
     
    7477
    7578    // build pointer on server function
    76     void * func = is_rx ? &dev_nic_rx_server : &dev_nic_tx_server;
     79    void * server_func = is_rx ? &dev_nic_rx_server : &dev_nic_tx_server;
    7780
    7881    // create server thread
    7982    error = thread_kernel_create( &new_thread,
    8083                                  THREAD_DEV,
    81                                   func,
     84                                  server_func,
    8285                                  chdev,
    8386                                  lid );
     
    120123    thread_t * this = CURRENT_THREAD;
    121124   
    122     xptr_t dev_xp = chdev_dir.nic_tx[0];
     125    // get cluster and local pointer fo the nic_tx[0] chdev
     126    xptr_t    dev_xp  = chdev_dir.nic_tx[0];
    123127    chdev_t * dev_ptr = GET_PTR( dev_xp );
    124 
    125     if( dev_xp == XPTR_NULL ) return -1;
     128    cxy_t     dev_cxy = GET_CXY( dev_xp );
     129
     130    if( dev_xp == XPTR_NULL )
     131    {
     132
     133#if DEBUG_DEV_NIC_ERROR
     134printk("\n[ERROR] in %s : nic_tx[0] chdev undefined in chdev_dir of cluster %x\n",
     135__FUNCTION__, local_cxy );
     136#endif
     137        return -1;
     138    }
    126139   
    127140    // set command arguments in client thread descriptor
     
    131144    this->nic_cmd.type   = NIC_CMD_GET_KEY;
    132145
     146    // get cmd function pointer from nic_tx[0] chdev descriptor
     147    dev_cmd_t * cmd = hal_remote_lpt( XPTR( dev_cxy , &dev_ptr->cmd ));
     148   
    133149    // call driver
    134     dev_ptr->cmd( XPTR( local_cxy , this ) );
    135 
    136     // get "status"
     150    cmd( XPTR( local_cxy , this ) );
     151
     152    // return command status
    137153    return this->nic_cmd.status;
    138 }
     154
     155}  // end dev_nic_get_key()
    139156
    140157//////////////////////////////////////////
     
    146163    if( channel >= LOCAL_CLUSTER->nb_nic_channels ) return -1;
    147164
    148     xptr_t    dev_xp  = chdev_dir.nic_tx[0];
     165    // get cluster and local pointer fo the nic_tx[channel] chdev
     166    xptr_t    dev_xp  = chdev_dir.nic_tx[channel];
    149167    chdev_t * dev_ptr = GET_PTR( dev_xp );
    150 
    151     if( dev_xp == XPTR_NULL ) return -1;
     168    cxy_t     dev_cxy = GET_CXY( dev_xp );
     169
     170    if( dev_xp == XPTR_NULL )
     171    {
     172
     173#if DEBUG_DEV_NIC_ERROR
     174printk("\n[ERROR] in %s : nic_tx[%d] chdev undefined in chdev_dir of cluster %x\n",
     175__FUNCTION__, channel, local_cxy );
     176#endif
     177        return -1;
     178    }
    152179   
    153180    // set command arguments in client thread descriptor
     
    157184    this->nic_cmd.status = run;
    158185
     186    // get cmd function pointer from nic_tx[channel] chdev descriptor
     187    dev_cmd_t * cmd = hal_remote_lpt( XPTR( dev_cxy , &dev_ptr->cmd ));
     188   
    159189    // call driver
    160     dev_ptr->cmd( XPTR( local_cxy , this ) );
     190    cmd( XPTR( local_cxy , this ) );
    161191
    162192    // return "error"
    163193    return this->nic_cmd.error;
    164 }
     194
     195}  // end dev_nic_set_run()
    165196
    166197//////////////////////////////////
     
    169200    thread_t * this = CURRENT_THREAD;
    170201
     202    // get cluster and local pointer fo the nic_tx[0] chdev
    171203    xptr_t    dev_xp  = chdev_dir.nic_tx[0];
    172204    chdev_t * dev_ptr = GET_PTR( dev_xp );
     205    cxy_t     dev_cxy = GET_CXY( dev_xp );
    173206   
    174     if( dev_xp == XPTR_NULL ) return -1;
     207    if( dev_xp == XPTR_NULL )
     208    {
     209
     210#if DEBUG_DEV_NIC_ERROR
     211printk("\n[ERROR] in %s : nic_tx[0] chdev undefined in chdev_dir of cluster %x\n",
     212__FUNCTION__, local_cxy );
     213#endif
     214        return -1;
     215    }
    175216   
    176217    // set command arguments in client thread descriptor
     
    178219    this->nic_cmd.type   = NIC_CMD_GET_INSTRU;
    179220
     221    // get cmd function pointer from nic_tx[0] chdev descriptor
     222    dev_cmd_t * cmd = hal_remote_lpt( XPTR( dev_cxy , &dev_ptr->cmd ));
     223   
    180224    // call driver
    181     dev_ptr->cmd( XPTR( local_cxy , this ) );
     225    cmd( XPTR( local_cxy , this ) );
    182226
    183227    // return "error"
    184228    return this->nic_cmd.error;
    185 }
     229
     230}  // end dev_nic_get_instru()
     231
    186232
    187233////////////////////////////////////
     
    190236    thread_t * this = CURRENT_THREAD;
    191237
     238    // get cluster and local pointer fo the nic_tx[0] chdev
    192239    xptr_t    dev_xp  = chdev_dir.nic_tx[0];
    193240    chdev_t * dev_ptr = GET_PTR( dev_xp );
     241    cxy_t     dev_cxy = GET_CXY( dev_xp );
    194242   
    195     if( dev_xp == XPTR_NULL ) return -1;
     243    if( dev_xp == XPTR_NULL )
     244    {
     245
     246#if DEBUG_DEV_NIC_ERROR
     247printk("\n[ERROR] in %s : nic_tx[0] chdev undefined in chdev_dir of cluster %x\n",
     248__FUNCTION__, local_cxy );
     249#endif
     250        return -1;
     251    }
    196252   
    197253    // set command arguments in client thread descriptor
     
    199255    this->nic_cmd.type   = NIC_CMD_GET_INSTRU;
    200256
     257    // get cmd function pointer from nic_tx[0] chdev descriptor
     258    dev_cmd_t * cmd = hal_remote_lpt( XPTR( dev_cxy , &dev_ptr->cmd ));
     259   
    201260    // call driver
    202     dev_ptr->cmd( XPTR( local_cxy , this ) );
     261    cmd( XPTR( local_cxy , this ) );
    203262
    204263    // return "error"
    205264    return this->nic_cmd.error;
    206 }
     265
     266}  // end dev_nic_clear_instru()
    207267
    208268
     
    261321
    262322////////////////////////////////////////////////////////////////////////////////////////
    263 // This static function computes the checksum for an UDP packet defined by
    264 // the <buffer> and <size> arguments.
     323// This static function computes the checksum for a TCP segment or an UDP packet,
     324// defined by the <buffer> and <length> arguments.
     325// It includes the "pseudo header "defined by the <src_ip_addr>, <dst_ip_addr>, and
     326// <tcp_length> arguments, and by the UDP/TCP protocol code.
    265327////////////////////////////////////////////////////////////////////////////////////////
    266 // @ buffer      : [in] pointer on UDP packet base.
    267 // @ size        : [in] number of bytes in this packet (including header).
     328// @ buffer      : [in] pointer on buffer containing the TCP segment or UDP packet.
     329// @ length      : [in] number of bytes in this packet/segment (including header).
     330// @ src_ip_addr : [in] source IP address (pseudo header).
     331// @ dst_ip_addr : [in] destination IP address (pseudo header).
     332// @ is_tcp      : [in] TCP if true / UDP if false (pseudo header).
    268333// @ return the checksum value on 16 bits
    269334////////////////////////////////////////////////////////////////////////////////////////
    270 static uint16_t dev_nic_udp_checksum( uint8_t  * buffer,
    271                                       uint32_t   size )
    272 {
    273     uint32_t   i;           
    274     uint32_t   carry;
    275     uint32_t   cs;      // 32 bits accumulator
    276     uint16_t * buf;     
    277     uint32_t   max;     // number of uint16_t in packet
    278    
    279     // compute max & buf
    280     buf = (uint16_t *)buffer;
    281     max = size >> 1;
    282 
    283     // extend buffer[] if required
    284     if( size & 1 )
    285     {
    286         max++;
    287         buffer[size] = 0;
    288     }
    289 
    290     // compute checksum for UDP packet
    291     for( i = 0 , cs = 0 ; i < size ; i++ )  cs += buf[i];
    292 
    293     // handle carry
    294     carry = (cs >> 16);
    295     if( carry )
    296     {
    297         cs += carry;
    298         carry = (cs >> 16);
    299         if( carry ) cs += carry;
    300     }
    301 
    302     // one's complement
    303     return ~cs;
    304 }
    305 
    306 ////////////////////////////////////////////////////////////////////////////////////////
    307 // This static function computes the checksum for a TCP segment defined by the <buffer>
    308 // and <size> arguments. It includes the pseudo header defined by the <src_ip_addr>,
    309 // <dst_ip_addr>, <size> arguments, and by the TCP_PROTOCOL code.
    310 ////////////////////////////////////////////////////////////////////////////////////////
    311 // @ buffer      : [in] pointer on TCP segment base.
    312 // @ tcp_length  : [in] number of bytes in this TCP segment (including header).
    313 // @ src_ip_addr : [in] source IP address (pseudo header)
    314 // @ dst_ip_addr : [in] destination IP address (pseudo header)
    315 // @ return the checksum value on 16 bits
    316 ////////////////////////////////////////////////////////////////////////////////////////
    317 static uint16_t dev_nic_tcp_checksum( uint8_t  * buffer,
    318                                       uint32_t   tcp_length,
    319                                       uint32_t   src_ip_addr,
    320                                       uint32_t   dst_ip_addr )
     335static uint16_t dev_nic_tcp_udp_checksum( uint8_t  * buffer,
     336                                          uint32_t   length,
     337                                          uint32_t   src_ip_addr,
     338                                          uint32_t   dst_ip_addr,
     339                                          bool_t     is_tcp )
    321340{
    322341    uint32_t   i;           
     
    324343    uint32_t   cs;      // 32 bits accumulator
    325344    uint16_t * buf;
    326     uint32_t   max;     // number of uint16_t in segment
     345    uint32_t   max;     // number of uint16_t in segment/paket
    327346
    328347    // compute max & buf
    329348    buf = (uint16_t *)buffer;
    330     max = tcp_length >> 1;
     349    max = length >> 1;
    331350
    332351    // extend buffer[] if required
    333     if( tcp_length & 1 )
     352    if( length & 1 )
    334353    {
    335354        max++;
    336         buffer[tcp_length] = 0;
     355        buffer[length] = 0;
    337356    }
    338357
    339358    // compute checksum for TCP segment
    340     for( i = 0 , cs = 0 ; i < tcp_length ; i++ )  cs += buf[i];
     359    for( i = 0 , cs = 0 ; i < max ; i++ )  cs += buf[i];
    341360
    342361    // complete checksum for pseudo-header
    343     cs += src_ip_addr;
    344     cs += dst_ip_addr;
    345     cs += PROTOCOL_TCP;
    346     cs += tcp_length;
     362    cs += (src_ip_addr & 0xFFFF);
     363    cs += (src_ip_addr >> 16 );
     364    cs += (dst_ip_addr & 0xFFFF);
     365    cs += (dst_ip_addr >> 16 );
     366    cs += length;
     367    cs += (is_tcp ? PROTOCOL_TCP : PROTOCOL_UDP);
    347368
    348369    // handle carry
     
    360381
    361382///////////////////////////////////////////////////////////////////////////////////////////
    362 // This static function is called by the NIC_TX or NIC_RX server threads to unblock
     383// This static function is called by the NIC_TX and NIC_RX server threads to unblock
    363384// the TX client thread after completion (success or error) of a TX command registered
    364 // in a socket identified by the <socket_xp> argument. The <status> argument defines
    365 // the command success/failure status: a null value signals a success, a non-null value
    366 // signals a failure. For all commands, it copies the status value in the tx_sts field,
    367 // and print an error message on TXT0 in case of failure.
     385// in a socket identified by the <socket_xp> argument.
     386// The <status> argument defines the command success/failure status.
     387// For all commands, it copies the status value in the tx_sts field, and print an error
     388// message on TXT0 in case of failure.
    368389///////////////////////////////////////////////////////////////////////////////////////////
    369390// @ socket_xp  : [in] extended pointer on socket
     
    377398    cxy_t      socket_cxy = GET_CXY( socket_xp );
    378399
    379     if( status != CMD_STS_SUCCESS )
     400    if( (status != CMD_STS_SUCCESS) && (status != CMD_STS_EOF) )
    380401    {
    381402        uint32_t sock_state = hal_remote_l32( XPTR( socket_cxy , &socket_ptr->state ));
     
    400421}  // end dev_nic_unblock_tx_client()
    401422
    402 ///////////////////////////////////////////////////////////////////////////////////////////
    403 // This static function is called by the NIC_TX or NIC_RX server threads to unblock
     423
     424///////////////////////////////////////////////////////////////////////////////////////////
     425//               Functions called by the NIC_RX server thread
     426///////////////////////////////////////////////////////////////////////////////////////////
     427
     428///////////////////////////////////////////////////////////////////////////////////////////
     429// This static function is called by the NIC_RX server threads to unblock
    404430// the RX client thread after completion (success or error) of an RX command registered
    405 // in a socket identified by the <socket_xp> argument. The <status> argument defines
    406 // the command success/failure status: a null value signals a success, a non-null value
    407 // signals a failure. For all commands, it copies the status value in the rx_sts field,
    408 // and print an error message on TXT0 in case of failure.
     431// in a socket identified by the <socket_xp> argument.
     432// The <status> argument defines the command success/failure status.
     433// For all commands, it copies the status value in the rx_sts field, and print an error
     434// message on TXT0 in case of failure.
    409435///////////////////////////////////////////////////////////////////////////////////////////
    410436// @ socket_xp  : [in] extended pointer on socket
     
    418444    cxy_t      socket_cxy = GET_CXY( socket_xp );
    419445
    420     if( status != CMD_STS_SUCCESS )
     446    if( (status != CMD_STS_SUCCESS) && (status != CMD_STS_EOF) )
    421447    {
    422448        uint32_t sock_state = hal_remote_l32( XPTR( socket_cxy , &socket_ptr->state ));
     
    440466
    441467}  // end dev_nic_unblock_rx_client()
    442 
    443 ///////////////////////////////////////////////////////////////////////////////////////////
    444 //               Functions called by the NIC_RX server thread
    445 ///////////////////////////////////////////////////////////////////////////////////////////
    446468
    447469///////////////////////////////////////////////////////////////////////////////////////////
     
    553575   
    554576    return 0;
    555 }
     577
     578}   // end dev_nic_rx_check_ip()
    556579
    557580///////////////////////////////////////////////////////////////////////////////////////////
     
    595618    xptr_t     socket_rbuf_xp;    // extended pointer on socket rx_buf
    596619    xptr_t     socket_lock_xp;    // extended pointer on socket lock
    597     xptr_t     socket_client_xp;  // extended pointer on socket rx_client field
    598     xptr_t     client_xp;         // extended pointer on client thread descriptor
     620    xptr_t     socket_rx_client;  // socket rx_client thread
     621    bool_t     socket_rx_valid;   // socket rx_command valid
     622    uint32_t   socket_rx_cmd;     // socket rx_command type
    599623    uint32_t   payload;           // number of bytes in payload
    600624    uint32_t   status;            // number of bytes in rx_buf
     
    602626    uint32_t   moved_bytes;       // number of bytes actually moved to rx_buf
    603627
     628#if DEBUG_DEV_NIC_RX || DEBUG_DEV_NIC_ERROR
     629thread_t * this  = CURRENT_THREAD;
     630uint32_t   cycle = (uint32_t)hal_get_cycles();
     631#endif
     632
     633#if DEBUG_DEV_NIC_RX
     634uint32_t   fdid;
     635uint32_t   pid;
     636if( DEBUG_DEV_NIC_RX < cycle )
     637printk("\n[%s] thread[%x,%x] enter / channel %d / plen %d / cycle %d\n",
     638__FUNCTION__, this->process->pid, this->trdid, chdev->channel, k_length, cycle );
     639if( (DEBUG_DEV_NIC_RX < cycle) && (DEBUG_DEV_NIC_RX & 1))
     640putb("64 first bytes in k_buf" , k_buf , 64 );
     641#endif
     642
    604643    // build extended pointers on list of sockets attached to NIC_RX chdev
    605644    root_xp = XPTR( local_cxy , &chdev->wait_root );
    606645    lock_xp = XPTR( local_cxy , &chdev->wait_lock );
    607646
    608     // compute UDP packet checksum
    609     checksum = dev_nic_udp_checksum( k_buf , k_length );
    610 
    611     // get checksum from received packet header
     647    // extract checksum from received UDP packet header
    612648    pkt_checksum = ((uint16_t)k_buf[6] << 8) | (uint16_t)k_buf[7];
    613649
     650    // reset checksum field
     651    k_buf[6] = 0;
     652    k_buf[7] = 0;
     653
     654    // compute checksum from received UDP packet
     655    checksum = dev_nic_tcp_udp_checksum( k_buf,
     656                                         k_length,
     657                                         pkt_src_addr,
     658                                         pkt_dst_addr,
     659                                         false );        // is_not_tcp
    614660    // discard corrupted packet 
    615     if( pkt_checksum != checksum ) return;
     661    if( pkt_checksum != checksum )
     662    {
     663
     664#if DEBUG_DEV_NIC_ERROR
     665printk("\n[WARNING] in %s : thread[%x,%x] discard corrupted packet on channel %d / cycle %d\n"
     666"   expected checksum %x / received checksum %x\n",
     667__FUNCTION__, this->process->pid, this->trdid, chdev->channel, cycle,
     668(uint32_t)checksum, (uint32_t)pkt_checksum );
     669#endif
     670        return;
     671    }
    616672   
    617673    // get src_port and dst_port from UDP header
     
    619675    uint32_t pkt_dst_port = ((uint32_t)k_buf[2] << 8) | (uint32_t)k_buf[3];
    620676
    621     // discard unexpected packet
    622     if( xlist_is_empty( root_xp ) ) return;
    623  
    624677    // take the lock protecting the sockets list
    625678    remote_busylock_acquire( lock_xp );
     
    658711        else                                  match_socket = local_match;
    659712
    660         // exit loop when socket found
    661         if( match_socket ) break;
     713        // exit loop if matching
     714        if( match_socket )
     715        {
     716
     717#if DEBUG_DEV_NIC_RX
     718fdid = hal_remote_l32( XPTR( socket_cxy , &socket_ptr->fdid ) );
     719pid  = hal_remote_l32( XPTR( socket_cxy , &socket_ptr->pid ) );
     720if( DEBUG_DEV_NIC_RX < cycle )
     721printk("\n[%s] thread[%x,%x] found matching UDP socket[%d,%d] / state %s\n",
     722__FUNCTION__, this->process->pid, this->trdid, pid, fdid, socket_state_str(socket_state) );
     723#endif
     724            break;
     725        }
    662726    }
    663727
     
    666730
    667731    // discard unexpected packet
    668     if( match_socket == false ) return;
    669    
    670     // build extended pointers on various socket fields
     732    if( match_socket == false )
     733    {
     734
     735#if DEBUG_DEV_NIC_ERROR
     736printk("\n[WARNING] in %s : thread[%x,%s] discard unexpected packet on channel %d / cycle %d\n",
     737__FUNCTION__, this->process->pid, this->trdid, chdev->channel, cycle );
     738#endif
     739        return;
     740    }
     741 
     742    // build extended pointers on socket.rx_buf and socket.lock
    671743    socket_rbuf_xp   = XPTR( socket_cxy , &socket_ptr->rx_buf );
    672744    socket_lock_xp   = XPTR( socket_cxy , &socket_ptr->lock );
    673     socket_client_xp = XPTR( socket_cxy , &socket_ptr->rx_client );
    674745
    675746    // take the lock protecting the socket
     
    678749    // get status & space from rx_buf
    679750    status = remote_buf_status( socket_rbuf_xp );
    680     space  = CONFIG_SOCK_RX_BUF_SIZE - status;
    681 
    682     // get client thread
    683     client_xp  = hal_remote_l64( socket_client_xp );
     751    space  = (1 << CONFIG_SOCK_RX_BUF_ORDER) - status;
     752
     753    // get socket rx_client, rx_valid and rx_cmd values
     754    socket_rx_client = hal_remote_l64( XPTR( socket_cxy , &socket_ptr->rx_client ) );
     755    socket_rx_valid  = hal_remote_l32( XPTR( socket_cxy , &socket_ptr->rx_valid ) );
     756    socket_rx_cmd    = hal_remote_l32( XPTR( socket_cxy , &socket_ptr->rx_cmd ) );
    684757
    685758    // get number of bytes in payload
     
    691764    // move payload from kernel buffer to socket rx_buf
    692765    remote_buf_put_from_kernel( socket_rbuf_xp,
    693                                  k_buf + UDP_HEAD_LEN,
    694                                  moved_bytes );
    695     // unblock client thread
    696     if( client_xp != XPTR_NULL )
    697     {
    698         thread_unblock( client_xp , THREAD_BLOCKED_IO );
     766                                k_buf + UDP_HEAD_LEN,
     767                                moved_bytes );
     768#if DEBUG_DEV_NIC_RX
     769if( DEBUG_DEV_NIC_RX < cycle )
     770printk("\n[%s] thread[%x,%x] for socket[%d,%d] move %d bytes to rx_buf / buf_sts %d\n",
     771__FUNCTION__, this->process->pid, this->trdid, pid, fdid,
     772moved_bytes, remote_buf_status(socket_rbuf_xp), moved_bytes );
     773#endif
     774
     775    // signal client thread if pending RECV command
     776    if( (socket_rx_valid == true) && (socket_rx_cmd == CMD_RX_RECV) )
     777    {
     778        // reset rx_valid
     779        hal_remote_s32( XPTR(socket_cxy , &socket_ptr->rx_valid), false );
     780
     781        // report success to RX client thread
     782        dev_nic_unblock_rx_client( socket_xp , CMD_STS_SUCCESS );
     783
     784#if DEBUG_DEV_NIC_RX
     785if( DEBUG_DEV_NIC_RX < cycle )
     786printk("\n[%s] thread[%x,%x] for UDP socket[%x,%d] / unblock client thread\n",
     787__FUNCTION__, this->process->pid, this->trdid, pid, fdid );
     788#endif
     789
     790    }
     791    else
     792    {
     793
     794#if DEBUG_DEV_NIC_RX
     795if( DEBUG_DEV_NIC_RX < cycle )
     796printk("\n[%s] thread[%x,%x] for socket[%x,%d] / no client thread\n"
     797"    rx_valid %d / rx_cmd %s\n",
     798__FUNCTION__, this->process->pid, this->trdid, pid, fdid,
     799socket_rx_valid , socket_cmd_type_str(socket_rx_cmd) );
     800#endif
     801
    699802    }
    700803
     
    707810// This static function is called by the dev_nic_rx_server() function to handle one RX
    708811// TCP segment contained in a kernel buffer defined by the <k_buf> & <k_length> arguments.
    709 // The <seg_remote_addr> and <seg_local_addr> arguments are obtained from the received
    710 // IP packet header. It the received segment doesn't match any connected socket attached
    711 // to the selected chdev[k], or any listening socket waiting connection, or if the segment
    712 // is corrupted, this segment is discarded.
    713 // If required by the TCP flags, it registers an R2T request in the socket R2T queue
    714 // to implement the TCP handcheck for close and connect.
     812// The <seg_remote_addr> and <seg_local_addr> arguments have been extracted from the IP
     813// IP header. The local and remote ports are obtained from the TCP header.
     814// It the received segment doesn't match any connected socket attached to the selected
     815// <chdev>, or any listening socket waiting connection, or if the segment is corrupted,
     816// the segment is discarded. This function implement the TCP error recovery protocol,
     817// as specified by the RFC. Depending on both the socket state, and the segment header:
     818//  - it register data in the RX buffer,
     819//  - it update the socket state and TCB,
     820//  - it register acknolegce requests in the R2T queue,
     821//  - it register connection requests in the CRQ queue,
    715822///////////////////////////////////////////////////////////////////////////////////////////
    716823// Implementation note:
     
    724831//    the SYN, FIN, ACK and RST flags. It updates the socket state when required, moves
    725832//    data to the rx_buf when possible, and return. It takes the lock protecting the socket,
    726 //    because an connected socket is accessed by both the NIC_TX and NIC_RX server threads.
     833//    because a connected socket is accessed by both the NIC_TX and NIC_RX server threads.
    727834// 4) If no matching connected socket has been found, it scans the list of listening
    728835//    sockets to find a matching listening socket.
     
    760867    bool_t     socket_tx_valid;    // TX command valid
    761868    uint32_t   socket_tx_cmd;      // TX command type
    762     uint32_t   socket_tx_todo;     // number of TX bytes not sent yet
    763869    uint32_t   socket_tx_nxt;      // next byte to send in TX stream
    764870    uint32_t   socket_tx_una;      // first unacknowledged byte in TX stream
     871    uint32_t   socket_tx_len;      // number of bytes in tx_buf
     872    uint32_t   socket_tx_ack;      // number of acknowledged bytes in tx_buf
    765873    bool_t     socket_rx_valid;    // RX command valid
    766874    uint32_t   socket_rx_cmd;      // TX command type
     
    804912    uint32_t seg_data_len = k_length - seg_hlen;  // number of bytes in payload
    805913
    806 #if DEBUG_DEV_NIC_RX
    807 thread_t * this = CURRENT_THREAD;
    808 uint32_t   cycle;
     914    uint32_t seg_data_dup;     // number of duplicated bytes in payload
     915    uint32_t seg_data_new;     // number of new bytes in payload
     916
     917#if DEBUG_DEV_NIC_RX || DEBUG_DEV_NIC_ERROR
    809918uint32_t   fdid;
    810919pid_t      pid;
    811 #endif
    812 
    813 #if DEBUG_DEV_NIC_RX
    814 cycle = (uint32_t)hal_get_cycles();
    815 if( cycle > DEBUG_DEV_NIC_RX )
     920thread_t * this  = CURRENT_THREAD;
     921uint32_t   cycle = (uint32_t)hal_get_cycles();
     922#endif
     923
     924#if DEBUG_DEV_NIC_RX
     925if( DEBUG_DEV_NIC_RX < cycle )
    816926printk("\n[%s] thread[%x,%x] enters / tcp_length %d / tcp_flags %x / cycle %d\n",
    817927__FUNCTION__, this->process->pid, this->trdid, k_length, seg_flags , cycle );
    818928#endif
    819929
    820     // compute and check TCP checksum
     930    // reset checksum field
    821931    k_buf[16] = 0;
    822932    k_buf[17] = 0;
    823     checksum = dev_nic_tcp_checksum( k_buf,
    824                                      k_length,
    825                                      seg_remote_addr,
    826                                      seg_local_addr );
    827 
     933
     934    // compute TCP checksum
     935    checksum = dev_nic_tcp_udp_checksum( k_buf,
     936                                         k_length,
     937                                         seg_remote_addr,
     938                                         seg_local_addr,
     939                                         true );            // is_tcp
    828940    // discard segment if corrupted
    829941    if( seg_checksum != checksum )
    830942    {
    831943
    832 #if DEBUG_DEV_NIC_RX
    833 if( cycle > DEBUG_DEV_NIC_RX )
    834 printk("\n[%s] thread[%x,%x] tcp checksum failure : received %x / computed %x\n",
    835 __FUNCTION__, this->process->pid, this->trdid, seg_checksum, checksum );
     944#if DEBUG_DEV_NIC_ERROR
     945printk("\n[WARNING] in %s : thread[%x,%x] / checksum failure on channel %d / cycle %d\n",
     946__FUNCTION__, this->process->pid, this->trdid, chdev->channel, cycle );
    836947#endif
    837948        return;
    838949    }
    839950   
    840     // scan list of attached sockets to find a matching TCP socket
    841     attached_match = false;
    842 
    843951    // build extended pointer on xlist of sockets attached to NIC_RX chdev
    844952    root_xp = XPTR( local_cxy , &chdev->wait_root );
    845953    lock_xp = XPTR( local_cxy , &chdev->wait_lock );
    846954
     955    attached_match = false;
     956
    847957    // take the lock protecting the list of attached sockets
    848958    remote_busylock_acquire( lock_xp );
    849959
     960    // scan list of attached sockets to find a matching TCP socket
    850961    XLIST_FOREACH( root_xp , iter_xp )
    851962    {
     
    878989        {
    879990
    880 #if DEBUG_DEV_NIC_RX
    881 fdid = hal_remote_l32( XPTR( socket_cxy , &socket_ptr->fdid ) );
    882 pid  = hal_remote_l32( XPTR( socket_cxy , &socket_ptr->pid ) );
    883 if( cycle > DEBUG_DEV_NIC_RX )
    884 printk("\n[%s] thread[%x,%x] matching attached socket[%d,%d] / state %s\n",
    885 __FUNCTION__, this->process->pid, this->trdid, pid, fdid, socket_state_str(socket_state) );
     991#if DEBUG_DEV_NIC_RX || DEBUG_DEV_NIC_ERROR
     992fdid  = hal_remote_l32( XPTR( socket_cxy , &socket_ptr->fdid ) );
     993pid   = hal_remote_l32( XPTR( socket_cxy , &socket_ptr->pid ) );
     994#endif
     995
     996#if DEBUG_DEV_NIC_RX
     997if( DEBUG_DEV_NIC_RX < cycle )
     998printk("\n[%s] matching attached TCP socket[%d,%d] / state %s\n",
     999__FUNCTION__, pid, fdid, socket_state_str(socket_state) );
    8861000#endif
    8871001            break;
     
    9121026        socket_tx_valid = hal_remote_l32(XPTR( socket_cxy , &socket_ptr->tx_valid ));
    9131027        socket_tx_cmd   = hal_remote_l32(XPTR( socket_cxy , &socket_ptr->tx_cmd ));
    914         socket_tx_todo  = hal_remote_l32(XPTR( socket_cxy , &socket_ptr->tx_todo ));
    9151028        socket_tx_nxt   = hal_remote_l32(XPTR( socket_cxy , &socket_ptr->tx_nxt ));
    9161029        socket_tx_una   = hal_remote_l32(XPTR( socket_cxy , &socket_ptr->tx_una ));
     1030        socket_tx_ack   = hal_remote_l32(XPTR( socket_cxy , &socket_ptr->tx_ack ));
     1031        socket_tx_len   = hal_remote_l32(XPTR( socket_cxy , &socket_ptr->tx_len ));
    9171032
    9181033        socket_rx_valid = hal_remote_l32(XPTR( socket_cxy , &socket_ptr->rx_valid ));
     
    9261041        {
    9271042            ////////////////////////
    928             case TCP_STATE_SYN_SENT:  // TCP client waiting for SYN-ACK in connect handshake
     1043            case TCP_STATE_SYN_SENT:  // TCP client waiting for SYN-ACK
    9291044            {
    930                 // [1] check ACK flag
     1045                // [1] & [2] check ACK and RST
    9311046                if( seg_ack_set )
    9321047                {
    933                     if( seg_ack_num != TCP_ISS_CLIENT + 1 )  // bad ACK => report error
     1048                    bool_t ack_ok = (seg_ack_num == (CONFIG_SOCK_ISS_CLIENT + 1) );
     1049
     1050                    if( seg_rst_set && ack_ok )
    9341051                    {
    9351052
    9361053#if DEBUG_DEV_NIC_RX
    937 printk("\n[%s] thread[%x,%x] socket[%x,%d] %s : expect ack_num %x / get %x\n",
    938 __FUNCTION__, this->process->pid, this->trdid, pid, fdid,
    939 socket_state_str(socket_state), TCP_ISS_CLIENT + 1, seg_ack_num );
    940 #endif
    941                         // make an RST request to R2T queue
     1054if( DEBUG_DEV_NIC_RX < cycle )
     1055printk("\n[%s]  socket[%x,%d] %s RST received from remote TCP => close\n",
     1056__FUNCTION__, pid, fdid, socket_state_str(socket_state) );
     1057#endif
     1058                        // report RST to local TCP client thread
     1059                        dev_nic_unblock_tx_client( socket_xp , CMD_STS_RST );
     1060
     1061                        // update socket state
     1062                        hal_remote_s32( XPTR( socket_cxy , &socket_ptr->state ),
     1063                                          TCP_STATE_BOUND );
     1064                        break;
     1065                    }
     1066
     1067                    if( seg_rst_set && (ack_ok == false) )
     1068                    {
     1069
     1070#if DEBUG_DEV_NIC_ERROR
     1071printk("\n[ERROR] in %s : socket[%x,%d] %s RST but expect ack_num %x != rcvd %x => discard\n",
     1072__FUNCTION__, pid, fdid, socket_state_str(socket_state),
     1073CONFIG_SOCK_ISS_CLIENT + 1, seg_ack_num );
     1074#endif
     1075                        break;
     1076                    }
     1077
     1078                    if( (seg_rst_set == false) && (ack_ok == false) )
     1079                    {
     1080
     1081#if DEBUG_DEV_NIC_ERROR
     1082printk("\n[ERROR] in %s : socket[%x,%d] %s expected ack_num %x != rcvd %x => send RST\n",
     1083__FUNCTION__, pid, fdid, socket_state_str(socket_state),
     1084CONFIG_SOCK_ISS_CLIENT + 1, seg_ack_num );
     1085#endif
     1086                        // send RST to remote TCP
    9421087                        socket_put_r2t_request( socket_r2tq_xp,
    9431088                                                TCP_FLAG_RST,
    9441089                                                chdev->channel );
    945 
    946                         // report error to local TX client thread
    947                         dev_nic_unblock_tx_client( socket_xp , CMD_STS_BADACK );
    948 
    9491090                        break;
    9501091                    }
    9511092                }
    9521093
    953                 // [2] check RST flag                       // receive RST => report error
    954                 if( seg_rst_set )
    955                 {
    956 
    957 #if DEBUG_DEV_NIC_RX
    958 printk("\n[%s] thread[%x,%x] socket[%x,%d] %s : received RST flag\n",
    959 __FUNCTION__, this->process->pid, this->trdid, pid, fdid, socket_state_str(socket_state) );
    960 #endif
    961                     // update socket state
    962                     hal_remote_s32( XPTR( socket_cxy , &socket_ptr->state ),
    963                                           TCP_STATE_BOUND );
    964 
    965                     // signal error to local TX client thread
    966                     dev_nic_unblock_tx_client( socket_xp , CMD_STS_RST );
    967 
    968                     break;
    969                 }
    970 
    9711094                // [3] handle security & precedence TODO ... someday
    9721095
     
    9761099
    9771100#if DEBUG_DEV_NIC_RX
    978 printk("\n[%s] thread[%x,%x] socket[%x,%d] %s : received expected SYN-ACK\n",
    979 __FUNCTION__, this->process->pid, this->trdid, pid, fdid , socket_state_str(socket_state) );
     1101if( DEBUG_DEV_NIC_RX < cycle )
     1102printk("\n[%s] socket[%x,%d] %s : received expected SYN-ACK\n",
     1103__FUNCTION__, pid, fdid , socket_state_str(socket_state) );
    9801104#endif
    9811105                    // set socket.tx_una 
     
    9991123                    dev_nic_unblock_tx_client( socket_xp , CMD_STS_SUCCESS );
    10001124                }
    1001                 else        // received SYN without ACK => client becomes server
     1125                else        // SYN without ACK => TCP client becomes a TCP server
    10021126                {
    10031127
    10041128#if DEBUG_DEV_NIC_RX
    1005 printk("\n[%s] thread[%x,%x] for socket[%x,%d] %s : received SYN-ACK => become server\n",
    1006 __FUNCTION__, this->process->pid, this->trdid, pid, fdid , socket_state_str(socket_state) );
     1129if( DEBUG_DEV_NIC_RX < cycle )
     1130printk("\n[%s] socket[%x,%d] %s : received SYN without ACK => send a SYN_ACK\n",
     1131__FUNCTION__, pid, fdid , socket_state_str(socket_state) );
    10071132#endif
    10081133                    // update socket.state
    1009                     hal_remote_s32( XPTR(socket_cxy,&socket_ptr->state), TCP_STATE_SYN_RCVD );
     1134                    hal_remote_s32( XPTR(socket_cxy,&socket_ptr->state),
     1135                                    TCP_STATE_SYN_RCVD );
    10101136
    10111137                    // set socket.tx_nxt
    1012                     hal_remote_s32( XPTR(socket_cxy , &socket_ptr->tx_nxt), TCP_ISS_SERVER );
     1138                    hal_remote_s32( XPTR(socket_cxy , &socket_ptr->tx_nxt),
     1139                                    CONFIG_SOCK_ISS_SERVER );
    10131140
    10141141                    // set socket.rx_nxt to seg_seq_num + 1
    10151142                    hal_remote_s32( XPTR(socket_cxy,&socket_ptr->rx_nxt), seg_seq_num + 1 );
    10161143
    1017                     // make a SYN.ACK request to R2T queue
     1144                    // send SYN.ACK to remote TCP
    10181145                    socket_put_r2t_request( socket_r2tq_xp,
    10191146                                            TCP_FLAG_SYN | TCP_FLAG_ACK,
     
    10211148                }
    10221149                break;
    1023             }
    1024             ////////////////////////
    1025             case TCP_STATE_SYN_RCVD:  // TCP server waiting last ACK in connect handshake
    1026             {
    1027                 // [1] check sequence number
    1028                 if( seg_seq_num != socket_rx_nxt )        // unexpected SEQ_NUM => discard
    1029                 {
    1030 
    1031 #if DEBUG_DEV_NIC_RX
    1032 printk("\n[%s] thread[%x,%x] socket[%x,%d] %s : expect seq_num %x / get %x\n",
    1033 __FUNCTION__, this->process->pid, this->trdid, pid, fdid,
    1034 socket_state_str(socket_state), socket_rx_nxt, seg_seq_num );
    1035 #endif
    1036                     // discard segment without reporting
    1037                     break;
    1038                 }
    1039 
    1040                 // [2] handle RST flag                    // received RST => report error
    1041                 if( seg_rst_set )
    1042                 {
    1043 
    1044 #if DEBUG_DEV_NIC_RX
    1045 printk("\n[%s] thread[%x,%x] socket[%x,%d] %s : received RST flag\n",
    1046 __FUNCTION__, this->process->pid, this->trdid, pid, fdid , socket_state_str(socket_state) );
    1047 #endif
    1048                     // update socket state
    1049                     hal_remote_s32( XPTR( socket_cxy , &socket_ptr->state ), TCP_STATE_BOUND );
    1050 
    1051                     // report error to local TX client thread
    1052                     dev_nic_unblock_tx_client( socket_xp , CMD_STS_RST );
    1053 
    1054                     break;
    1055                 }
    1056 
    1057                 // [3] handle security & precedence TODO ... someday
    1058 
    1059                 // [4] handle SYN flag
    1060                 if( seg_syn_set )                           // received SYN => discard
    1061                 {
    1062 
    1063 #if DEBUG_DEV_NIC_RX
    1064 printk("\n[%s] thread[%x,%x] socket[%x,%d] %s : received SYN flag\n",
    1065 __FUNCTION__, this->process->pid, this->trdid, pid, fdid , socket_state_str(socket_state) );
    1066 #endif
    1067                     // discard segment without reporting
    1068                     break;
    1069                 }
    1070 
    1071                 // [5] handle  ACK flag
    1072                 if( seg_ack_set == false )                      // missing ACK => discard
    1073                 {
    1074 
    1075 #if DEBUG_DEV_NIC_RX
    1076 printk("\n[%s] thread[%x,%x] socket[%x,%d] %s : no ACK in TCP segment\n",
    1077 __FUNCTION__, this->process->pid, this->trdid, pid, fdid , socket_state_str(socket_state) );
    1078 #endif
    1079                     // discard segment without reporting
    1080                     break;
    1081                 }
    1082                 else if( seg_ack_num != (TCP_ISS_SERVER + 1) )  // unacceptable ACK
    1083                 {
    1084 
    1085 #if DEBUG_DEV_NIC_RX
    1086 printk("\n[%s] thread[%x,%x] socket[%x,%d] %s : expect ack_num %x / get %x\n",
    1087 __FUNCTION__, this->process->pid, this->trdid, pid, fdid,
    1088 socket_state_str(socket_state), TCP_ISS_SERVER + 1, seg_ack_num );
    1089 #endif
    1090 
    1091                     // register an RST request to R2TQ for remote TCP client
    1092                     socket_put_r2t_request( socket_r2tq_xp,
    1093                                             TCP_FLAG_RST,
    1094                                             chdev->channel );
    1095 
    1096                     // report error to local TX client thread
    1097                     dev_nic_unblock_tx_client( socket_xp , CMD_STS_BADACK );
    1098                 }
    1099                 else                                           // acceptable ACK
    1100                 {
    1101 
    1102 #if DEBUG_DEV_NIC_RX
    1103 printk("\n[%s] thread[%x,%x] socket[%x,%d] %s : received expected ACK\n",
    1104 __FUNCTION__, this->process->pid, this->trdid, pid, fdid , socket_state_str(socket_state) );
    1105 #endif
    1106                     // set socket.tx_una 
    1107                     hal_remote_s32( XPTR(socket_cxy , &socket_ptr->tx_una), seg_ack_num );
    1108 
    1109                     // update socket.state
    1110                     hal_remote_s32( XPTR(socket_cxy , &socket_ptr->state),
    1111                                     TCP_STATE_ESTAB );
    1112 
    1113                     // report success to local TX client thread
    1114                     dev_nic_unblock_tx_client( socket_xp , CMD_STS_SUCCESS );
    1115                 }
    1116                 break;
    1117             }
    1118             /////////////////////
     1150            }  // end state SYN_SENT
     1151
     1152            ////////////////////////    all "connected" states
     1153            case TCP_STATE_SYN_RCVD:
    11191154            case TCP_STATE_ESTAB:
    11201155            case TCP_STATE_FIN_WAIT1:
     
    11251160            case TCP_STATE_TIME_WAIT:
    11261161            {
    1127                 // [1] check sequence number : out_of_order segments not accepted
    1128                 if( seg_seq_num != socket_rx_nxt )
     1162                // [1] check SEQ_NUM
     1163                // - we accept duplicate segments (i.e. seq_num < rx_next)
     1164                // - we don't accept out of order segment (i.e. seq_num_num > rx_next)
     1165                //   => seq_num must be in window [rx_nxt - rx_win , rx_nxt]
     1166
     1167                bool_t seq_ok =  is_in_window( seg_seq_num,
     1168                                               (socket_rx_nxt - socket_rx_wnd),
     1169                                               socket_rx_nxt );
     1170
     1171                if( seq_ok == false ) // SEQ_NUM not acceptable
    11291172                {
    1130 
    1131 #if DEBUG_DEV_NIC_RX
    1132 printk("\n[%s] thread[%x,%x] socket[%x,%d] %s : illegal SEQ_NUM %x / expected %x\n",
    1133 __FUNCTION__, this->process->pid, this->trdid, pid, fdid,
    1134 socket_state_str(socket_state), seg_seq_num, socket_rx_nxt );
    1135 #endif
    1136                     // discard segment
    1137                     break;
    1138                 }
    1139 
    1140                 // check all bytes in window when the payload exist
    1141                 // TODO : we could accept bytes that are in window,
    1142                 // but this implementation reject all bytes in segment
    1143                 if( seg_data_len > 0 )
    1144                 {
    1145                     // compute min & max acceptable sequence numbers
    1146                     uint32_t seq_min  = socket_rx_nxt;
    1147                     uint32_t seq_max  = socket_rx_nxt + socket_rx_wnd - 1;
    1148 
    1149                     // compute sequence number for last byte in segment
    1150                     uint32_t seg_seq_last = seg_seq_num + seg_data_len - 1;
    1151                      
    1152                     if( is_in_window( seg_seq_last, seq_min, seq_max ) == false )
     1173                    if( seg_rst_set )
    11531174                    {
    11541175
    1155 #if DEBUG_DEV_NIC_RX
    1156 printk("\n[%s] thread[%x,%x] socket[%x,%d] %s : last SEQ_NUM %x not in [%x,%x]\n",
    1157 __FUNCTION__, this->process->pid, this->trdid, pid, fdid,
    1158 socket_state_str(socket_state), seg_seq_last, seq_min, seq_max );
    1159 #endif
    1160                         // discard segment
     1176#if DEBUG_DEV_NIC_ERROR
     1177printk("\n[ERROR] in %s : socket[%x,%d] %s expect seq_num %x != rcvd %x and RST => discard\n",
     1178__FUNCTION__, pid, fdid, socket_state_str(socket_state),
     1179CONFIG_SOCK_ISS_CLIENT + 1, seg_seq_num );
     1180#endif
    11611181                        break;
    11621182                    }
    1163                 }
    1164 
    1165                 // [2] handle RST flag
    1166                 if( seg_rst_set )
     1183                    else  // no RST
     1184                    {
     1185                        // send ACK to remote TCP
     1186                        socket_put_r2t_request( socket_r2tq_xp,
     1187                                                TCP_FLAG_ACK,
     1188                                                chdev->channel );
     1189#if DEBUG_DEV_NIC_ERROR
     1190printk("\n[ERROR] in %s : socket[%x,%d] %s expect seq_num %x != rcvd %x => ACK and discard\n",
     1191__FUNCTION__, pid, fdid, socket_state_str(socket_state),
     1192CONFIG_SOCK_ISS_CLIENT + 1, seg_seq_num );
     1193#endif
     1194                        break;
     1195                    }
     1196                }
     1197                else                   // SEQ_NUM acceptable
    11671198                {
    1168 
    1169 #if DEBUG_DEV_NIC_RX
    1170 printk("\n[%s] thread[%x,%x] socket[%x,%d] %s : received RST flag\n",
    1171 __FUNCTION__, this->process->pid, this->trdid, pid, fdid, socket_state_str(socket_state) );
    1172 #endif
    1173                     if( (socket_state == TCP_STATE_ESTAB     ) ||
    1174                         (socket_state == TCP_STATE_FIN_WAIT1 ) ||
    1175                         (socket_state == TCP_STATE_FIN_WAIT2 ) ||
    1176                         (socket_state == TCP_STATE_CLOSE_WAIT) )
     1199                    // compute number of new bytes & number of duplicated bytes
     1200                    if( seg_seq_num != socket_rx_nxt )  // duplicate segment
    11771201                    {
    1178                         // TODO all pending send & received commands
    1179                         // must receive "reset" responses
    1180 
    1181                         // TODO destroy the socket
     1202                        seg_data_dup = socket_rx_nxt - seg_seq_num;
     1203                        seg_data_new = (seg_data_len > seg_data_dup) ?
     1204                                       (seg_data_len - seg_data_dup) : 0;
     1205                    }
     1206                    else                                // expected segment
     1207                    {
     1208                        seg_data_dup = 0;
     1209                        seg_data_new = seg_data_len;
     1210                    }
     1211                   
     1212#if DEBUG_DEV_NIC_RX
     1213if( DEBUG_DEV_NIC_RX < cycle )
     1214printk("\n[%s] socket[%x,%d] %s seq_num %x / rx_nxt %x / len %d / new %d / dup %d\n",
     1215__FUNCTION__, pid, fdid, socket_state_str(socket_state),
     1216seg_seq_num, socket_rx_nxt, seg_data_len, seg_data_new, seg_data_dup );
     1217#endif
     1218                }
     1219
     1220                // [2] handle RST flag (depending on socket state)
     1221                if( seg_rst_set  )
     1222                {
     1223                    if( socket_state == TCP_STATE_SYN_RCVD )
     1224                    {
     1225
     1226#if DEBUG_DEV_NIC_RX
     1227if( DEBUG_DEV_NIC_RX < cycle )
     1228printk("\n[%s] socket[%x,%d] %s RST received from remote TCP => report to user\n",
     1229__FUNCTION__, pid, fdid, socket_state_str(socket_state) );
     1230#endif
     1231                        // report RST to local TX client thread
     1232                        dev_nic_unblock_tx_client( socket_xp , CMD_STS_RST );
     1233
     1234                        // update socket state
     1235                        hal_remote_s32( XPTR(socket_cxy , &socket_ptr->state),
     1236                                        TCP_STATE_BOUND );
     1237                        break;
     1238                    }
     1239
     1240                    else if( (socket_state == TCP_STATE_ESTAB     ) ||
     1241                             (socket_state == TCP_STATE_FIN_WAIT1 ) ||
     1242                             (socket_state == TCP_STATE_FIN_WAIT2 ) ||
     1243                             (socket_state == TCP_STATE_CLOSE_WAIT) )
     1244                    {
     1245
     1246#if DEBUG_DEV_NIC_RX
     1247if( DEBUG_DEV_NIC_RX < cycle )
     1248printk("\n[%s] socket[%x,%d] %s / received RST flag\n",
     1249__FUNCTION__, pid, fdid, socket_state_str(socket_state) );
     1250#endif
     1251                        // report RST to local TX client thread
     1252                        if( socket_tx_valid ) dev_nic_unblock_tx_client( socket_xp,
     1253                                                                         CMD_STS_RST );
     1254                        // report RST to local RX client thread
     1255                        if( socket_rx_valid ) dev_nic_unblock_rx_client( socket_xp,
     1256                                                                         CMD_STS_RST );
     1257                        // update socket state
     1258                        hal_remote_s32( XPTR(socket_cxy , &socket_ptr->state),
     1259                                        TCP_STATE_BOUND );
     1260                        break;
    11821261                    }
    11831262                    else  // states CLOSING / LAST_ACK / TIME_WAIT
    11841263                    {
    1185                         // TODO         
     1264                        // update socket state
     1265                        hal_remote_s32( XPTR(socket_cxy , &socket_ptr->state),
     1266                                        TCP_STATE_BOUND );
     1267                        break;
    11861268                    }
     1269                }
     1270 
     1271                // [3] handle security & precedence TODO ... someday
     1272
     1273                // [4] check SYN 
     1274                if( seg_syn_set )           // received SYN => send RST to remote
     1275                {
     1276
     1277#if DEBUG_DEV_NIC_ERROR
     1278printk("\n[ERROR] in %s socket[%x,%d] %s : received SYN flag => send RST-ACK\n",
     1279__FUNCTION__, pid, fdid , socket_state_str(socket_state) );
     1280#endif
     1281                    // send RST & ACK to remote TCP
     1282                    socket_put_r2t_request( socket_r2tq_xp,
     1283                                            TCP_FLAG_RST | TCP_FLAG_ACK,
     1284                                            chdev->channel );
     1285
     1286                    // report RST to local TX client thread
     1287                    if( socket_tx_valid ) dev_nic_unblock_tx_client( socket_xp,
     1288                                                                     CMD_STS_RST );
     1289                    // report RST to local RX client thread
     1290                    if( socket_rx_valid ) dev_nic_unblock_rx_client( socket_xp,
     1291                                                                     CMD_STS_RST );
     1292                    // update socket state
     1293                    hal_remote_s32( XPTR(socket_cxy , &socket_ptr->state),
     1294                                    TCP_STATE_BOUND );
    11871295                    break;
    11881296                }
    11891297
    1190                 // [3] handle security & precedence TODO ... someday
    1191 
    1192                 // [4] check SYN flag
    1193                 if( seg_syn_set )                                // received SYN => ERROR
     1298                // [5] handle ACK (depending on socket state)
     1299                if( seg_ack_set == false )           // missing ACK => discard segment
    11941300                {
    11951301
    1196 #if DEBUG_DEV_NIC_RX
    1197 printk("\n[%s] thread[%x,%x] socket[%x,%d] %s : received unexpected SYN\n",
    1198 __FUNCTION__, this->process->pid, this->trdid, pid, fdid , socket_state_str(socket_state) );
    1199 #endif
    1200                     // TODO signal error to user
    1201 
    1202                     // make an RST request to R2T queue
    1203                     socket_put_r2t_request( socket_r2tq_xp,
    1204                                             TCP_FLAG_RST,
    1205                                             chdev->channel );
    1206 
    1207                     // update socket state
    1208                     hal_remote_s32( XPTR(socket_cxy , &socket_ptr->state), TCP_STATE_BOUND );
    1209 
     1302#if DEBUG_DEV_NIC_ERROR
     1303printk("\n[ERROR] in %s : socket[%x,%d] %s / no ACK in segment => discard\n",
     1304__FUNCTION__, pid, fdid, socket_state_str(socket_state) );
     1305#endif
    12101306                    break;
    12111307                }
    12121308
    1213                 // [5] check ACK 
    1214                 if( seg_ack_set == false )                           // missing ACK
     1309                // compute acceptable ACK
     1310                bool_t ack_ok = is_in_window( seg_ack_num,
     1311                                              socket_tx_una,
     1312                                              socket_tx_nxt );
     1313
     1314                if( socket_state == TCP_STATE_SYN_RCVD )
    12151315                {
    1216 
    1217 #if DEBUG_DEV_NIC_RX
    1218 printk("\n[%s] thread[%x,%x] socket[%x,%d] %s : no ACK flag\n",
    1219 __FUNCTION__, this->process->pid, this->trdid, pid, fdid, socket_state_str(socket_state) );
    1220 #endif
    1221                     // discard segment
    1222                     break;
    1223                 }
    1224                 else if( is_in_window( seg_ack_num,
    1225                                        socket_tx_una,
    1226                                        socket_tx_nxt ) == false )    // unacceptable ACK
    1227                 {
    1228 
    1229 #if DEBUG_DEV_NIC_RX
    1230 printk("\n[%s] thread[%x,%x] socket[%x,%d] %s : ACK_NUM %x not in [%x,%x]\n",
    1231 __FUNCTION__, this->process->pid, this->trdid, pid, fdid, socket_state_str(socket_state),
    1232 seg_ack_num, socket_tx_una, socket_tx_nxt );
    1233 #endif
    1234                     // discard segment
    1235                     break;
    1236                 }
    1237                 else                                                // acceptable ack
    1238                 {
    1239                     // update socket.tx_una
    1240                     hal_remote_s32( XPTR(socket_cxy , &socket_ptr->tx_una), seg_ack_num );
    1241 
    1242                     // update socket.tx_wnd 
    1243                     hal_remote_s32( XPTR(socket_cxy , &socket_ptr->tx_wnd), seg_window );
    1244 
    1245                     // check last data byte acknowledged for a SEND command
    1246                     if( (socket_tx_todo == 0) &&
    1247                         (seg_ack_num == socket_tx_nxt) &&
    1248                         (socket_tx_cmd == CMD_TX_SEND) )
     1316                    if( ack_ok )                      //  acceptable ACK
    12491317                    {
    1250                         // signal success to TX client thread
     1318
     1319#if DEBUG_DEV_NIC_RX
     1320if( DEBUG_DEV_NIC_RX < cycle )
     1321printk("\n[%s] socket[%x,%d] %s : received expected ACK => update socket\n",
     1322__FUNCTION__, pid, fdid , socket_state_str(socket_state) );
     1323#endif
     1324                        // set socket.tx_una 
     1325                        hal_remote_s32( XPTR(socket_cxy , &socket_ptr->tx_una), seg_ack_num );
     1326
     1327                        // update socket.state
     1328                        hal_remote_s32( XPTR(socket_cxy , &socket_ptr->state),
     1329                                        TCP_STATE_ESTAB );
     1330
     1331                        // report success to local TX client thread
    12511332                        dev_nic_unblock_tx_client( socket_xp , CMD_STS_SUCCESS );
    12521333                    }
     1334                    else                               // send RST to remote
     1335                    {
     1336
     1337#if DEBUG_DEV_NIC_ERROR
     1338printk("\n[ERROR] in %s : socket[%x,%d] %s / ACK %x not in [%x,%x] => discard\n",
     1339__FUNCTION__, pid, fdid, socket_state_str(socket_state),
     1340seg_ack_num, socket_tx_una, socket_tx_nxt );
     1341#endif
     1342                        // send RST & ACK to remote TCP
     1343                        socket_put_r2t_request( socket_r2tq_xp,
     1344                                                TCP_FLAG_RST | TCP_FLAG_ACK,
     1345                                                chdev->channel );
     1346                        break;
     1347                    }
    12531348                }
    1254                    
    1255                 // [7] handle URG flag  TODO ... someday
    1256 
    1257                 // [8] Move DATA to rx_buf / ACK request to R2T queue / unblock rx_client
    1258                 if( seg_data_len )
     1349
     1350                else if( (socket_state == TCP_STATE_ESTAB)      ||
     1351                         (socket_state == TCP_STATE_FIN_WAIT1)  ||
     1352                         (socket_state == TCP_STATE_FIN_WAIT2)  ||
     1353                         (socket_state == TCP_STATE_FIN_WAIT2)  ||
     1354                         (socket_state == TCP_STATE_CLOSE_WAIT) ||
     1355                         (socket_state == TCP_STATE_CLOSING)    )
    12591356                {
    1260                     if( (socket_state == TCP_STATE_ESTAB)     ||
    1261                         (socket_state == TCP_STATE_FIN_WAIT1) ||
    1262                         (socket_state == TCP_STATE_FIN_WAIT2) )
     1357                    if( ack_ok )                      // acceptable ack
     1358                    {
     1359                        // compute number of acknowledged bytes
     1360                        uint32_t ack_bytes =  seg_ack_num - socket_tx_una;
     1361
     1362                        if( ack_bytes )  // handle acknowledged bytes
     1363                        {
     1364#if DEBUG_DEV_NIC_RX
     1365if( DEBUG_DEV_NIC_RX < cycle )
     1366printk("\n[%s] socket[%x,%d] %d bytes acknowledged => update socket\n",
     1367__FUNCTION__, pid, fdid, ack_bytes );
     1368#endif
     1369                            // update socket.tx_una, socket.tx_ack, and socket.tx_wnd fields
     1370                            hal_remote_s32( XPTR(socket_cxy , &socket_ptr->tx_una),
     1371                                            seg_ack_num );
     1372                            hal_remote_s32( XPTR(socket_cxy , &socket_ptr->tx_ack),
     1373                                            socket_tx_ack + ack_bytes );
     1374                            hal_remote_s32( XPTR(socket_cxy , &socket_ptr->tx_wnd),
     1375                                            seg_window );
     1376
     1377                            //  unblock the TX client thread if last byte acknowledged
     1378                            if( (socket_tx_ack + ack_bytes) == socket_tx_len )
     1379                            {
     1380                                // report success to TX client thread
     1381                                dev_nic_unblock_tx_client( socket_xp , CMD_STS_SUCCESS );
     1382#if DEBUG_DEV_NIC_RX
     1383if( DEBUG_DEV_NIC_RX < cycle )
     1384printk("\n[%s] socket[%x,%d] %s : last ack => unblock TX client thread\n",
     1385__FUNCTION__, pid, fdid, socket_state_str(socket_state) );
     1386#endif
     1387                            }
     1388                        }
     1389
     1390                        if( socket_state == TCP_STATE_FIN_WAIT1 )
     1391                        {
     1392                            // update socket state
     1393                            hal_remote_s32( XPTR(socket_cxy , &socket_ptr->state),
     1394                                            TCP_STATE_FIN_WAIT2 );
     1395                        }
     1396                        if( socket_state == TCP_STATE_FIN_WAIT2 )
     1397                        {
     1398                            // TODO
     1399                        }
     1400                        else if( socket_state == TCP_STATE_CLOSING )
     1401                        {
     1402                            // update socket state
     1403                            hal_remote_s32( XPTR(socket_cxy , &socket_ptr->state),
     1404                                            TCP_STATE_TIME_WAIT );
     1405                        }
     1406                        else if( socket_state == TCP_STATE_CLOSING )
     1407                        {
     1408                            // TODO
     1409                        }
     1410                    }
     1411                    else                    // unacceptable ACK => discard segment
     1412                    {
     1413
     1414#if DEBUG_DEV_NIC_ERROR
     1415printk("\n[ERROR] in %s : socket[%x,%d] %s / ACK %x not in [%x,%x] => discard\n",
     1416__FUNCTION__, pid, fdid, socket_state_str(socket_state),
     1417seg_ack_num, socket_tx_una, socket_tx_nxt );
     1418#endif
     1419                        break;
     1420                    }
     1421                }
     1422   
     1423                else if( socket_state == TCP_STATE_LAST_ACK )
     1424                {
     1425                    // TODO
     1426                }
     1427
     1428                else if( socket_state == TCP_STATE_TIME_WAIT )
     1429                {
     1430                    // TODO
     1431                }
     1432
     1433                // [6] handle URG flag  TODO ... someday
     1434
     1435                // [7] handle received data : update socket state,
     1436                // move data to rx_buf, register ACK request to R2T queue,
     1437                // unblock the RX client thread in case of pending RX_RECV command
     1438                if((socket_state == TCP_STATE_ESTAB)     ||
     1439                   (socket_state == TCP_STATE_FIN_WAIT1) ||
     1440                   (socket_state == TCP_STATE_FIN_WAIT2) )
     1441                {
     1442                    // register new bytes if requested
     1443                    if( seg_data_new )
    12631444                    {
    12641445                        // get number of bytes already stored in rx_buf
    12651446                        uint32_t status = remote_buf_status( socket_rx_buf_xp );
    12661447
    1267                         // compute empty space in rx_buf
    1268                         uint32_t space = CONFIG_SOCK_RX_BUF_SIZE - status;
    1269 
    1270                         // compute number of bytes to move : min (space , seg_data_len)
    1271                         uint32_t nbytes = ( space < seg_data_len ) ? space : seg_data_len;
    1272 
    1273                         // move payload from k_buf to rx_buf
     1448                        // compute space in rx_buf and actual number of acceptable bytes
     1449                        // when (space < seg_data_new) the last new bytes are discarded
     1450                        uint32_t space = (1 << CONFIG_SOCK_RX_BUF_ORDER) - status;
     1451                        uint32_t rcv_bytes = (space < seg_data_new) ? space : seg_data_new;
     1452
     1453                        // move new bytes from k_buf to rx_buf
    12741454                        remote_buf_put_from_kernel( socket_rx_buf_xp,
    1275                                                     k_buf + seg_hlen,
    1276                                                     nbytes );
    1277 #if DEBUG_DEV_NIC_RX
    1278 printk("\n[%s] thread[%x,%x] socket[%x,%d] %s : move %d bytes to rx_buf\n",
    1279 __FUNCTION__, this->process->pid, this->trdid, pid, fdid,
    1280 socket_state_str(socket_state), nbytes );
    1281 #endif
    1282                         // update socket.rx_nxt
     1455                                                    k_buf + seg_hlen + seg_data_dup,
     1456                                                    rcv_bytes );
     1457#if DEBUG_DEV_NIC_RX
     1458if( DEBUG_DEV_NIC_RX < cycle )
     1459printk("\n[%s] socket[%x,%d] %s : move %d bytes to rx_buf\n",
     1460__FUNCTION__, pid, fdid, socket_state_str(socket_state), rcv_bytes );
     1461 #endif
     1462                        // update socket.rx_nxt and socket_rx_wnd fields
    12831463                        hal_remote_s32( XPTR( socket_cxy , &socket_ptr->rx_nxt ),
    1284                                               socket_rx_nxt + nbytes );
    1285 
    1286                         // update socket.rx_wnd
     1464                                        socket_rx_nxt + rcv_bytes );
    12871465                        hal_remote_s32( XPTR( socket_cxy , &socket_ptr->rx_wnd ),
    1288                                               socket_rx_wnd - nbytes );
    1289 
    1290                         // make an ACK request to R2T queue
     1466                                        socket_rx_wnd - rcv_bytes );
     1467
     1468                        // unblock RX client if required
     1469                        if( (socket_rx_valid == true) && (socket_rx_cmd == CMD_RX_RECV) )
     1470                        {
     1471                            // reset rx_valid
     1472                            hal_remote_s32( XPTR(socket_cxy,&socket_ptr->rx_valid), false );
     1473
     1474                            // report success to RX client thread
     1475                            dev_nic_unblock_rx_client( socket_xp , CMD_STS_SUCCESS );
     1476#if DEBUG_DEV_NIC_RX
     1477if( DEBUG_DEV_NIC_RX < cycle )
     1478printk("\n[%s] socket[%x,%d] %s : last data => unblock RX client thread\n",
     1479__FUNCTION__, pid, fdid, socket_state_str(socket_state) );
     1480#endif
     1481                        }
     1482                    }
     1483
     1484                    // make an ACK request to remote
     1485                    socket_put_r2t_request( socket_r2tq_xp,
     1486                                            TCP_FLAG_ACK,
     1487                                            chdev->channel );
     1488                }  // end payload handling
     1489
     1490                // [8] handle FIN flag depending on socket state
     1491                if( (socket_state == TCP_STATE_SYN_RCVD) ||
     1492                    (socket_state == TCP_STATE_ESTAB )   )
     1493                {
     1494                    if( seg_fin_set )     
     1495                    {
     1496
     1497#if DEBUG_DEV_NIC_RX
     1498if( DEBUG_DEV_NIC_RX < cycle )
     1499printk("\n[%s] socket[%x,%d] %s : FIN-ACK => goes CLOSE_WAIT\n",
     1500__FUNCTION__, pid, fdid, socket_state_str(socket_state) );
     1501#endif
     1502                        // update socket.rx_nxt when FIN received
     1503                        hal_remote_s32( XPTR( socket_cxy , &socket_ptr->rx_nxt ),
     1504                                        socket_rx_nxt + 1 );
     1505
     1506                        // update socket state
     1507                        hal_remote_s32( XPTR( socket_cxy , &socket_ptr->state ),
     1508                                        TCP_STATE_CLOSE_WAIT );
     1509
     1510                        // send ACK to remote TCP
    12911511                        socket_put_r2t_request( socket_r2tq_xp,
    12921512                                                TCP_FLAG_ACK,
     
    12941514
    12951515                        // check pending RX_RECV command
    1296                         if( (socket_rx_valid == true) &&
    1297                             (socket_rx_cmd == CMD_RX_RECV) )
     1516                        if( (socket_rx_valid == true) && (socket_rx_cmd == CMD_RX_RECV) )
    12981517                        {
    12991518                            // reset rx_valid
    13001519                            hal_remote_s32( XPTR(socket_cxy,&socket_ptr->rx_valid), false );
    13011520
    1302                             // report success to RX client thread
    1303                             dev_nic_unblock_rx_client( socket_xp , CMD_STS_SUCCESS );
    1304 #if DEBUG_DEV_NIC_RX
    1305 printk("\n[%s] thread[%x,%x] socket[%x,%d] %s : unblock waiting RX client thread\n",
    1306 __FUNCTION__, this->process->pid, this->trdid, pid, fdid,
    1307 socket_state_str(socket_state) );
    1308 #endif
    1309                         }
    1310                     }
    1311                 }
    1312 
    1313                 // [9] handle FIN flag
    1314                 if( socket_state == TCP_STATE_ESTAB )
    1315                 {
    1316                     if( seg_fin_set )  // received ACK & FIN   
    1317                     {
    1318 
    1319 #if DEBUG_DEV_NIC_RX
    1320 printk("\n[%s] thread[%x,%x] socket[%x,%d] %s : FIN-ACK => goes CLOSE_WAIT\n",
    1321 __FUNCTION__, this->process->pid, this->trdid, pid, fdid,
    1322 socket_state_str(socket_state) );
    1323 #endif
    1324                         // update socket.rx_nxt when FIN received
    1325                         hal_remote_s32( XPTR( socket_cxy , &socket_ptr->rx_nxt ),
    1326                                         socket_rx_nxt + 1 );
    1327 
    1328                         // update socket state
    1329                         hal_remote_s32( XPTR( socket_cxy , &socket_ptr->state ),
    1330                                         TCP_STATE_CLOSE_WAIT );
    1331 
    1332                         // make an ACK request to R2T queue
    1333                         socket_put_r2t_request( socket_r2tq_xp,
    1334                                                 TCP_FLAG_ACK,
    1335                                                 chdev->channel );
    1336 
    1337                         // check pending RX_RECV command
    1338                         if( (socket_rx_valid == true) &&
    1339                             (socket_rx_cmd == CMD_RX_RECV) )
    1340                         {
    1341                             // reset rx_valid
    1342                             hal_remote_s32( XPTR(socket_cxy,&socket_ptr->rx_valid), false );
    1343 
    1344                             // report error to RX client thread
     1521                            // report FIN to RX client thread
    13451522                            dev_nic_unblock_rx_client( socket_xp , CMD_STS_EOF );
    13461523#if DEBUG_DEV_NIC_RX
    1347 printk("\n[%s] thread[%x,%x] socket[%x,%d] %s : unblock RX client waiting on RECV\n",
    1348 __FUNCTION__, this->process->pid, this->trdid, pid, fdid,
    1349 socket_state_str(socket_state) );
     1524if( DEBUG_DEV_NIC_RX < cycle )
     1525printk("\n[%s] socket[%x,%d] %s : unblock RX client waiting on RECV\n",
     1526__FUNCTION__, pid, fdid, socket_state_str(socket_state) );
    13501527#endif
    13511528                        }
     
    13541531                else if( socket_state == TCP_STATE_FIN_WAIT1 )
    13551532                {
    1356                     if( seg_fin_set )  // received ACK & FIN
     1533                    if( seg_fin_set )
    13571534                    {
    13581535
    13591536#if DEBUG_DEV_NIC_RX
    1360 printk("\n[%s] thread[%x,%x] socket[%x,%d] %s : FIN-ACK => goes CLOSING\n",
    1361 __FUNCTION__, this->process->pid, this->trdid, pid, fdid,
    1362 socket_state_str(socket_state) );
    1363 #endif
    1364                         // update socket.rx_nxt when FIN received
     1537if( DEBUG_DEV_NIC_RX < cycle )
     1538printk("\n[%s] socket[%x,%d] %s : FIN-ACK => goes CLOSING\n",
     1539__FUNCTION__, pid, fdid, socket_state_str(socket_state) );
     1540#endif
     1541                        // update socket.rx_nxt
    13651542                        hal_remote_s32( XPTR( socket_cxy , &socket_ptr->rx_nxt ),
    13661543                                        socket_rx_nxt + 1 );
     
    13701547                                        TCP_STATE_CLOSING );
    13711548
    1372                         // make an ACK request to R2T queue
     1549                        // send ACK request to remote
    13731550                        socket_put_r2t_request( socket_r2tq_xp,
    13741551                                                TCP_FLAG_ACK,
     
    13791556
    13801557#if DEBUG_DEV_NIC_RX
    1381 printk("\n[%s] thread[%x,%x] socket[%x,%d] %s : only ACK => goes FIN_WAIT2\n",
    1382 __FUNCTION__, this->process->pid, this->trdid, pid, fdid,
    1383 socket_state_str(socket_state) );
     1558if( DEBUG_DEV_NIC_RX < cycle )
     1559printk("\n[%s] socket[%x,%d] %s : only ACK => goes FIN_WAIT2\n",
     1560__FUNCTION__, pid, fdid, socket_state_str(socket_state) );
    13841561#endif
    13851562                        // update socket state
     
    13941571
    13951572#if DEBUG_DEV_NIC_RX
    1396 printk("\n[%s] thread[%x,%x] socket[%x,%d] %s : FIN-ACK => goes CLOSED / unblock client\n",
    1397 __FUNCTION__, this->process->pid, this->trdid, pid, fdid,
    1398 socket_state_str(socket_state) );
     1573if( DEBUG_DEV_NIC_RX < cycle )
     1574printk("\n[%s] socket[%x,%d] %s : FIN-ACK => goes CLOSED / unblock client\n",
     1575__FUNCTION__, pid, fdid, socket_state_str(socket_state) );
    13991576#endif
    14001577                        // update socket.rx_nxt when FIN received
     
    14381615                    dev_nic_unblock_tx_client( socket_xp , CMD_STS_SUCCESS );
    14391616                }
    1440             }  // end case connecteded states
     1617            }  // end case connected states
    14411618        }  // end switch socket state
    14421619
     
    14911668        {
    14921669
    1493 #if DEBUG_DEV_NIC_RX
    1494 fdid = hal_remote_l32( XPTR( socket_cxy , &socket_ptr->fdid ) );
    1495 pid  = hal_remote_l32( XPTR( socket_cxy , &socket_ptr->pid ) );
    1496 if( cycle > DEBUG_DEV_NIC_RX )
    1497 printk("\n[%s] thread[%x,%x] matching listening socket[%d,%d] / state %s\n",
    1498 __FUNCTION__, this->process->pid, this->trdid, pid, fdid, socket_state_str(socket_state) );
     1670#if DEBUG_DEV_NIC_RX || DEBUG_DEV_NIC_ERROR
     1671fdid  = hal_remote_l32( XPTR( socket_cxy , &socket_ptr->fdid ) );
     1672pid   = hal_remote_l32( XPTR( socket_cxy , &socket_ptr->pid ) );
     1673#endif
     1674
     1675#if DEBUG_DEV_NIC_RX
     1676if( DEBUG_DEV_NIC_RX < cycle )
     1677printk("\n[%s] matching listening socket[%d,%d] / state %s\n",
     1678__FUNCTION__, pid, fdid, socket_state_str(socket_state) );
    14991679#endif
    15001680            break;
     
    15091689    {
    15101690        // The actions depend on the received segment flags
    1511         // - discard segment for RST or ACK
    1512         // - update socket state & remote IP address,
    1513         //   register connect request in socket CRQ queue,
    1514         //   and unblock client thread for SYN
     1691        // - discard segment for RST or ACK,
     1692        // - for SYN, register the connect request in listening socket CRQ queue,
     1693        //   and  unblock the client thread in case of pending RX_ACCEPT command.
    15151694 
    1516         // discard segment if RST flag
    1517         if( seg_rst_set )
     1695        // [1] check RST
     1696        if( seg_rst_set )       // discard segment
    15181697        {
    15191698
    1520 #if DEBUG_DEV_NIC_RX
    1521 if( cycle > DEBUG_DEV_NIC_RX )
    1522 printk("\n[%s] thread[%x,%x] for listening socket[%x,%d] : received RST\n",
    1523 __FUNCTION__, this->process->pid, this->trdid, pid, fdid );
     1699#if DEBUG_DEV_NIC_ERROR
     1700printk("\n[ERROR] in %s : socket[%x,%d] %s / received RST => discard segment\n",
     1701__FUNCTION__, pid, fdid, socket_state_str(socket_state) );
    15241702#endif
    15251703            return;
    15261704        }
    15271705
    1528         // discard segment if ACK flag
    1529         if( seg_ack_set )
     1706        // [2] check ACK
     1707        if( seg_ack_set )    // send RST to remote
    15301708        {
    15311709
    1532 #if DEBUG_DEV_NIC_RX
    1533 if( cycle > DEBUG_DEV_NIC_RX )
    1534 printk("\n[%s] thread[%x,%x] for listening socket[%x,%d] : received ACK\n",
    1535 __FUNCTION__, this->process->pid, this->trdid, pid, fdid );
    1536 #endif
     1710#if DEBUG_DEV_NIC_ERROR
     1711printk("\n[ERROR] in %s : socket[%x,%d] %s received ACK => send RST & discard \n",
     1712__FUNCTION__, pid, fdid, socket_state_str(socket_state) );
     1713#endif
     1714            // make an RST request to R2T queue
     1715            socket_put_r2t_request( socket_r2tq_xp,
     1716                                    TCP_FLAG_RST,
     1717                                    chdev->channel );
    15371718            return;
    15381719        }
    15391720
    1540         // SYN flag == CONNECT request / seq_num cannot be wrong
     1721        // [3] handle security & precedence TODO ... someday
     1722
     1723        // handle SYN == CONNECT request
    15411724        if( seg_syn_set )
    15421725        {
    1543             // build extended pointer on listening socket CRQ
     1726            // build extended pointers on various listening socket fields
     1727            socket_lock_xp = XPTR( socket_cxy , &socket_ptr->lock );
    15441728            socket_crqq_xp = XPTR( socket_cxy , &socket_ptr->crqq );
     1729            socket_r2tq_xp = XPTR( socket_cxy , &socket_ptr->r2tq );
     1730
     1731            // take the lock protecting the matching socket
     1732            remote_queuelock_acquire( socket_lock_xp );
    15451733
    15461734            // try to register request into CRQ queue
     
    15501738                                            seg_seq_num,
    15511739                                            seg_window );
    1552 
    15531740            if ( error )   // CRQ full
    15541741            {
    15551742
    1556 #if DEBUG_DEV_NIC_RX
    1557 if( cycle > DEBUG_DEV_NIC_RX )
    1558 printk("\n[%s] thread[%x,%x] listening socket[%x,%d] CRQ full => send RST\n",
    1559 __FUNCTION__, this->process->pid, this->trdid, pid, fdid );
     1743#if DEBUG_DEV_NIC_ERROR
     1744printk("\n[ERROR] in %s : listening socket[%x,%d] %s receive SYN but CRQ full => send RST\n",
     1745__FUNCTION__, pid, fdid );
    15601746#endif
    15611747                // make an RST request to R2T queue
     
    15641750                                        chdev->channel );
    15651751            }
    1566             else          // new connection request registered in CRQ
     1752            else          // register request in listening socket CRQ
    15671753            {
    15681754
    15691755#if DEBUG_DEV_NIC_RX
     1756if( DEBUG_DEV_NIC_RX < cycle )
    15701757if( cycle > DEBUG_DEV_NIC_RX )
    1571 printk("\n[%s] thread[%x,%x] for listening socket[%x,%d] : register request in CRQ\n",
    1572 __FUNCTION__, this->process->pid, this->trdid, pid, fdid );
    1573 #endif
    1574                 // check pending RX_ACCEPT command
    1575                 if( (hal_remote_l32(XPTR(socket_cxy,&socket_ptr->rx_valid)) == true) &&
    1576                     (hal_remote_l32(XPTR(socket_cxy,&socket_ptr->rx_cmd)) == CMD_RX_ACCEPT) )
     1758printk("\n[%s] listening socket[%x,%d] register request in CRQ\n",
     1759__FUNCTION__, pid, fdid );
     1760#endif
     1761                bool_t   rx_valid = hal_remote_l32( XPTR(socket_cxy , &socket_ptr->rx_valid));
     1762                uint32_t rx_cmd   = hal_remote_l32( XPTR(socket_cxy , &socket_ptr->rx_cmd));
     1763               
     1764                // check pending ACCEPT command
     1765                if( rx_valid && (rx_cmd == CMD_RX_ACCEPT) )
    15771766                {
    15781767                    // reset rx_valid
    15791768                    hal_remote_s32( XPTR( socket_cxy , &socket_ptr->rx_valid ), false );
    15801769                     
    1581                     // report success to RX client thread
     1770                    // report success to RX client thread, that will
     1771                    // create a new socket and request a SYN-ACK to TX server thread
    15821772                    dev_nic_unblock_rx_client( socket_xp , CMD_STS_SUCCESS );
    15831773
    15841774#if DEBUG_DEV_NIC_RX
     1775if( DEBUG_DEV_NIC_RX < cycle )
    15851776if( cycle > DEBUG_DEV_NIC_RX )
    1586 printk("\n[%s] thread[%x,%x] for listening socket[%x,%d] unblock RX client thread\n",
    1587 __FUNCTION__, this->process->pid, this->trdid, pid, fdid );
     1777printk("\n[%s] listening socket[%x,%d] unblock RX client thread\n",
     1778__FUNCTION__, fdid );
    15881779#endif
    15891780                }
    15901781            }   // end register request in CRQ
     1782
     1783            // release the lock protecting the matching socket
     1784            remote_queuelock_release( socket_lock_xp );
     1785
    15911786        }   // end if SYN
    1592        
     1787
    15931788        return;
    15941789
    15951790    }  // end if listening_match
    15961791
    1597     // 6. no socket found => discard segment
    1598 
    1599 #if DEBUG_DEV_NIC_RX
    1600 if( cycle > DEBUG_DEV_NIC_RX )
    1601 printk("\n[%s] thread[%x,%x] exit failure : no socket found => discard segment\n",
    1602 __FUNCTION__, this->process->pid, this->trdid );
     1792    // 6. no attached socket found and no listening socket found => discard segment
     1793
     1794#if DEBUG_DEV_NIC_ERROR
     1795printk("\n[ERROR] in %s : thread[%x,%d] / unexpected TCP segment => discard / cycle %d\n",
     1796__FUNCTION__, this->process->pid, this->trdid, chdev->channel, cycle );
    16031797#endif
    16041798
     
    16181812
    16191813    thread_t    * this = CURRENT_THREAD;
    1620 
     1814   
    16211815// check thread can yield
    16221816thread_assert_can_yield( this , __FUNCTION__ );
     
    16261820"illegal chdev type or direction" );
    16271821
    1628 #if DEBUG_DEV_NIC_RX
    1629 uint32_t   cycle = (uint32_t)hal_get_cycles();
     1822#if DEBUG_DEV_NIC_RX || DEBUG_DEV_NIC_ERROR
     1823uint32_t cycle = (uint32_t)hal_get_cycles();
     1824#endif
     1825
     1826#if DEBUG_DEV_NIC_RX
    16301827if( cycle > DEBUG_DEV_NIC_RX )
    16311828printk("\n[%s] thread[%x,%x] starts / cycle %d\n",
    16321829__FUNCTION__, this->process->pid, this->trdid, cycle );
    16331830#endif
     1831
     1832    // avoid warning
     1833    ip_length = 0;
     1834    error     = 0;
    16341835
    16351836    // get extended pointers on server tread and chdev
     
    16741875__FUNCTION__, this->process->pid, this->trdid, cycle );
    16751876#endif
    1676 
     1877            // check possible error reported by NIC ISR
     1878            if( this->nic_cmd.error ) 
     1879            {
     1880                printk("\n[PANIC] in %s : %s DMA engine cannot access RX_QUEUE / cycle %d\n",
     1881                __FUNCTION__, chdev->name , (uint32_t)hal_get_cycles() );
     1882            }
    16771883        }
    16781884        else                 // success => handle packet
     
    16821888cycle = (uint32_t)hal_get_cycles();
    16831889if( DEBUG_DEV_NIC_RX < cycle )
    1684 dev_nic_packet_display( false,               // is_tx
    1685                         this->process->pid,
    1686                         this->trdid,
    1687                         cycle,
    1688                         k_buf );
    16891890#endif
    16901891
     
    16971898            {
    16981899
    1699 #if DEBUG_DEV_NIC_RX
    1700 cycle = (uint32_t)hal_get_cycles();
    1701 if( DEBUG_DEV_NIC_RX < cycle )
    1702 printk("\n[%s] thread[%x,%x] discard ETH packet / cycle %d\n",
     1900#if DEBUG_DEV_NIC_ERROR
     1901printk("\n[WARNING] in %s : thread[%x,%x] discard ETH packet / cycle %d\n",
    17031902__FUNCTION__, this->process->pid, this->trdid, cycle );
    17041903#endif
     
    17231922            {
    17241923
    1725 #if DEBUG_DEV_NIC_RX
    1726 cycle = (uint32_t)hal_get_cycles();
    1727 if( DEBUG_DEV_NIC_RX < cycle )
    1728 printk("\n[%s] thread[%x,%x] discarded IP packet / cycle %d\n",
     1924#if DEBUG_DEV_NIC_ERROR
     1925printk("\n[WARNING] in %s : thread[%x,%x] discard IP packet / cycle %d\n",
    17291926__FUNCTION__, this->process->pid, this->trdid, cycle );
    17301927#endif
     
    17621959            {
    17631960 
    1764 #if DEBUG_DEV_NIC_RX
     1961#if DEBUG_DEV_NIC_ERROR
    17651962cycle = (uint32_t)hal_get_cycles();
    17661963if( DEBUG_DEV_NIC_RX < cycle )
    1767 printk("\n[%s] thread[%x,%x] discarded unsupported transport protocol %d\n",
     1964printk("\n[WARNING] in %s : thread[%x,%x] unsupported transport protocol %d / cycle %d\n",
    17681965__FUNCTION__, this->process->pid, this->trdid, trsp_protocol, cycle );
    17691966#endif
    17701967                continue;
    17711968            }
    1772         }
    1773     } // end of while loop
    1774 // end dev_nic_rx_server()
     1969        }   // end else success
     1970    }   // end of while loop
     1971 // end dev_nic_rx_server()
    17751972
    17761973
     
    17821979
    17831980///////////////////////////////////////////////////////////////////////////////////////////
    1784 // This static function is called by the dev_nic_tx_build_packet() function.
     1981// This static function is called by the dev_nic_tx_send_packet() function.
    17851982// It moves one ETH/IP/UDP packet from the kernel buffer identified by the <buffer> and
    17861983// <length> arguments to the NIC_TX_QUEUE identified the <chdev> argument.
     
    18162013    this->nic_cmd.buffer = k_buf;
    18172014    this->nic_cmd.length = length;
     2015    this->nic_cmd.error  = 0;
    18182016
    18192017    while( 1 )
     
    18682066// <socket_xp> argument. The <length> argument defines the number of bytes in payload.
    18692067// It set the "src_port", "dst_port", "total_length" and "checksum" fields in UDP header.
    1870 // The payload must be previouly loaded in the pernel buffer.
     2068// The payload must be previouly loaded in the kernel buffer.
    18712069///////////////////////////////////////////////////////////////////////////////////////////
    18722070// @ k_buf      : [in]  pointer on first byte of UDP header in kernel buffer.
     
    19042102    k_buf[3] = remote_port;
    19052103
     2104    // reset checksum
     2105    k_buf[6] = 0;
     2106    k_buf[7] = 0;
     2107
    19062108    // set packet length in header
    19072109    k_buf[4] = total_length >> 8;
     
    19092111   
    19102112    // compute UDP packet checksum
    1911     checksum = dev_nic_udp_checksum( k_buf , total_length );
    1912 
     2113    checksum = dev_nic_tcp_udp_checksum( k_buf,
     2114                                         total_length,
     2115                                         local_addr,
     2116                                         remote_addr,
     2117                                         false );       // is_not_tcp
    19132118    // set checksum
    19142119    k_buf[6] = checksum >> 8;
     
    19202125// This static function is called by the dev_nic_tx_server() function.
    19212126// It builds a TCP header in the kernel buffer defined by the <k_buf> argument.
    1922 // The payload must have been previouly registered in this buffer.
     2127// The payload must have been previouly registered in this buffer (for checksum).
    19232128// The "local_addr", "local_port", "remote_addr", "remote_port", seq_num", "ack_num",
    19242129// and "window" fields are obtained from the <socket_xp> argument.
     
    19992204 
    20002205    // compute TCP segment checksum
    2001     checksum = dev_nic_tcp_checksum( k_buf,
    2002                                      total_length,
    2003                                      src_addr,
    2004                                      dst_addr );
     2206    checksum = dev_nic_tcp_udp_checksum( k_buf,
     2207                                         total_length,
     2208                                         src_addr,
     2209                                         dst_addr,
     2210                                         true );       // is_tcp
    20052211    // set "checksum"
    20062212    k_buf[16] = checksum >> 8;
     
    21082314}  // end dev_nic_tx_build_eth_header()
    21092315
    2110 ///////////////////////////////////////////////////////////////////////////////////////////
    2111 // This static function is called by the dev_nic_tx_server() function to handle one TX
    2112 // command, or one R2T request, as defined by the <cmd_valid> and <r2t_valid> arguments,
    2113 // for the socket identified by the <socket_xp> argument. It builds an ETH/IP/UDP packet
    2114 // or ETH/IP/TCP segment, in the buffer defined by the <k_buf> argument, and registers
    2115 // it in the NIC_TX queue defined by the <chdev> argument.
    2116 // For a TCP header, the "seq_num", ack_num", and "window" fiels are defined by the
    2117 // "socket.tx_next", "socket.rx_next" and "socket.rx_wnd" fields respectively.
    2118 // It updates the "socket.state", "socket.tx_nxt", "socket.r2tq", and "socket.crqq"
    2119 // The supported TX command types are CONNECT / ACCEPT / SEND / CLOSE.
    2120 // fields as required by the command type.
    2121 // - For an UDP socket, it reset the "socket.tx_valid" field, and unblock the client
    2122 //   thread when the packet has been sent, or when an error must be reported.
    2123 // - For a TCP socket, it reset the "socket.tx_valid" field when the segment has been
    2124 //   sent, but does not unblocks the client thread, that will be unblocqued by the
    2125 //   NIC_RX thread when the TX command is fully completed.
     2316
     2317///////////////////////////////////////////////////////////////////////////////////////////
     2318// This static function implement the TCP protocol as specified by the RFC.
     2319// It is called by the dev_nic_tx_server() function to handle one TX command,
     2320// or one R2T request, for the socket identified by the <socket_xp> argument.
     2321// It builds an ETH/IP/UDP packet or ETH/IP/TCP segment, in the 2 Kbytes kernel buffer,
     2322// defined by the <k_buf> argument from informations found in socket descriptor.
     2323// It returns a command status code (defined in the ksocket.h file), and returns in the
     2324// <total_length> argument the actual packet length.
     2325// It updates the "socket.state", "socket.tx_nxt", "socket.r2tq", "socket.crqq",
     2326// "socket.todo" fields as required by the command type, but it does NOT reset
     2327// the "socket.tx_valid" field and does NOT unblock the client thread.
     2328// It does NOt take the socket lock, that is taken by the dev_nic_server().
    21262329///////////////////////////////////////////////////////////////////////////////////////////
    21272330// To build a packet, it makes the following actions:
    2128 // 1) it takes the lock protecting the socket state.
    2129 // 2) it get the command arguments from socket descriptor.
    2130 // 3) it build an UDP packet or a TCP segment, depending on command type and socket state.
    2131 // 4) it updates the socket state.
    2132 // 5) it releases the lock protecting the socket.
    2133 // 6) it build the IP header.
    2134 // 7) it build the ETH header.
    2135 // 8) it copies the packet in the NIC_TX queue.
    2136 ///////////////////////////////////////////////////////////////////////////////////////////
    2137 // @ cmd_state   : [in] TX command valid in socket descriptor.
    2138 // @ r2t_valid   : [in] R2T request valid in command descriptor.
    2139 // @ socket_xp   : [in] extended pointer on client socket. 
    2140 // @ k_buf       : [in] local pointer on kernel buffer (2 Kbytes).
    2141 // @ chdev       : [in] local pointer on NIC_RX chdev.
    2142 ///////////////////////////////////////////////////////////////////////////////////////////
    2143 static void dev_nic_tx_build_packet( bool_t    cmd_valid,
    2144                                      bool_t    r2t_valid,
    2145                                      xptr_t    socket_xp,
    2146                                      uint8_t * k_buf,
    2147                                      chdev_t * chdev )
     2331// 1) it get the command arguments from socket descriptor.
     2332// 2) it build an UDP packet or a TCP segment, and update socket state.
     2333// 3) it build the IP header.
     2334// 4) it build the ETH header.
     2335///////////////////////////////////////////////////////////////////////////////////////////
     2336// @ socket_xp    : [in]  extended pointer on client socket. 
     2337// @ k_buf        : [in]  local pointer on kernel buffer (2 Kbytes).
     2338// @ total_length : [out] total number of bytes written in k_buf.
     2339// @ return command status.
     2340///////////////////////////////////////////////////////////////////////////////////////////
     2341static socket_cmd_sts_t dev_nic_tx_build_packet( xptr_t     socket_xp,
     2342                                                 uint8_t  * k_buf,
     2343                                                 uint32_t * total_length )
    21482344{
    21492345    socket_t  * socket_ptr;
    21502346    cxy_t       socket_cxy;
    21512347    xptr_t      client_xp;       // extended pointer on client thread 
     2348    bool_t      cmd_valid;       // valid user command
     2349    bool_t      r2t_valid;       // valid R2T queue request
    21522350    uint32_t    cmd_type;        // NIC command type
    2153     uint8_t   * tx_buf;          // local pointer on kernel buffer for payload
     2351    uint8_t   * tx_buf;          // local pointer on socket buffer for payload
    21542352    uint32_t    len;             // tx_buf length (bytes)
    21552353    uint32_t    todo;            // number of bytes not yet sent
    21562354    uint32_t    socket_type;     // socket type (UDP/TCP)
    21572355    uint32_t    socket_state;    // socket state       
    2158     xptr_t      socket_lock_xp;  // extended pointer on socket lock
    21592356    xptr_t      socket_r2tq_xp;  // extended pointer on R2T queue
    21602357    uint32_t    src_ip_addr;     // source IP address
     
    21662363    uint8_t     trsp_protocol;   // transport protocol type (UDP/TCP)
    21672364    uint8_t     r2t_flags;       // flags defined by one R2T queue request
    2168     bool_t      do_send;         // build & send a packet when true
    2169  
     2365 
    21702366    // get socket cluster and local pointer
    21712367    socket_cxy = GET_CXY( socket_xp );
    21722368    socket_ptr = GET_PTR( socket_xp );
    21732369
     2370#if DEBUG_DEV_NIC_TX  || DEBUG_DEV_NIC_ERROR
     2371uint32_t   cycle       = (uint32_t)hal_get_cycles();
     2372uint32_t   socket_fdid = hal_remote_l32( XPTR( socket_cxy , &socket_ptr->fdid ));
     2373uint32_t   socket_pid  = hal_remote_l32( XPTR( socket_cxy , &socket_ptr->pid ));
     2374#endif
     2375
     2376    // build extended pointer on socket r2t queue
     2377    socket_r2tq_xp = XPTR( socket_cxy , &socket_ptr->r2tq );
     2378
     2379    // get cmd_valid & t2t_valid from socket descriptor
     2380    cmd_valid = (bool_t)hal_remote_l32( XPTR( socket_cxy , &socket_ptr->tx_valid ));
     2381    r2t_valid = (bool_t)remote_buf_status( XPTR( socket_cxy , &socket_ptr->r2tq ));
     2382
    21742383#if DEBUG_DEV_NIC_TX
    2175 thread_t * this = CURRENT_THREAD;;
    2176 uint32_t   cycle = (uint32_t)hal_get_cycles();
    2177 uint32_t   fdid  = hal_remote_l32( XPTR( socket_cxy , &socket_ptr->fdid ));
    2178 uint32_t   pid   = hal_remote_l32( XPTR( socket_cxy , &socket_ptr->pid ));
    21792384if( cycle > DEBUG_DEV_NIC_TX )
    2180 printk("\n[%s] thread[%x,%x] enter for socket[%x,%d] : cmd_valid %d / r2t_valid %d / cycle %d\n",
    2181 __FUNCTION__, this->process->pid, this->trdid, pid, fdid, cmd_valid, r2t_valid, cycle );
    2182 #endif
    2183 
    2184     // build extended pointers on socket lock and r2t queue
    2185     socket_lock_xp = XPTR( socket_cxy , &socket_ptr->lock );   
    2186     socket_r2tq_xp = XPTR( socket_cxy , &socket_ptr->r2tq );
    2187 
    2188     // 1. take lock protecting this socket
    2189     remote_queuelock_acquire( socket_lock_xp );
    2190 
    2191     // get relevant socket infos
     2385printk("\n[%s] enter for socket[%x,%d] : cmd_val %d / r2t_val %d / cycle %d\n",
     2386__FUNCTION__, socket_pid, socket_fdid, cmd_valid, r2t_valid, cycle );
     2387#endif
     2388
     2389    // 1. get relevant socket infos
    21922390    socket_type  = hal_remote_l32( XPTR( socket_cxy , &socket_ptr->type ));
    21932391    socket_state = hal_remote_l32( XPTR( socket_cxy , &socket_ptr->state ));
     
    21952393    dst_ip_addr  = hal_remote_l32( XPTR( socket_cxy , &socket_ptr->remote_addr ));
    21962394
    2197     // compute UDP/TCP packet base in kernel buffer
     2395    // compute UDP/TCP packet base in local kernel buffer
    21982396    k_trsp_base = k_buf + ETH_HEAD_LEN + IP_HEAD_LEN;
    21992397
    2200     // set default values
    2201     do_send     = false;
     2398    // default value
    22022399    trsp_length = 0;
    2203     nbytes      = 0;
    2204 
    2205     if( cmd_valid )  // handle TX command
    2206     {
    2207         // 2. get command arguments from socket
     2400
     2401    if( cmd_valid )  // handle TX command depending on type
     2402    {
     2403        // get command arguments from socket
    22082404        cmd_type  = hal_remote_l32( XPTR( socket_cxy , &socket_ptr->tx_cmd ));
    22092405        tx_buf    = hal_remote_lpt( XPTR( socket_cxy , &socket_ptr->tx_buf ));
     
    22132409       
    22142410#if DEBUG_DEV_NIC_TX
    2215 cycle = (uint32_t)hal_get_cycles();
    22162411if( cycle > DEBUG_DEV_NIC_TX )
    2217 printk("\n[%s] thread[%x,%x] cmd_valid for socket[%x,%d] : %s / %s / cycle %d\n",
    2218 __FUNCTION__, this->process->pid, this->trdid, pid, fdid,
    2219 socket_cmd_type_str(cmd_type), socket_state_str(socket_state), cycle );
     2412printk("\n[%s] socket[%x,%d] / %s / command %s \n",
     2413__FUNCTION__, socket_pid, socket_fdid,
     2414socket_cmd_type_str(cmd_type),socket_state_str(socket_state) );
    22202415#endif
    22212416
    22222417        //////////////////////////////////////////////////////////
    2223         // 3. UDP : build UDP packet and update UDP socket state
     2418        // 2. UDP : build UDP packet and update UDP socket state
    22242419        if( socket_type == SOCK_DGRAM )       
    22252420        {
     
    22282423            if( socket_state != UDP_STATE_ESTAB )
    22292424            {
    2230                 // reset tx_valid
    2231                 hal_remote_s32( XPTR( socket_cxy , &socket_ptr->tx_valid ) , false );
    2232 
    2233                 // unblock client thread / report error
    2234                 dev_nic_unblock_tx_client( socket_xp , CMD_STS_BADSTATE );
     2425                return  CMD_STS_BADSTATE;
    22352426            }
    2236             else
     2427            else if( cmd_type == CMD_TX_SEND )
    22372428            {
    2238                 if( cmd_type == CMD_TX_SEND )
    2239                 {
    2240                     // compute payload length
    2241                     nbytes = ( PAYLOAD_MAX_LEN < todo ) ? PAYLOAD_MAX_LEN : todo;
    2242 
    2243                     // move payload from tx_buf to 2 Kbytes kernel buffer
    2244                     memcpy( k_trsp_base + UDP_HEAD_LEN,
    2245                             tx_buf + (len - todo),
    2246                             nbytes );
    2247 
    2248                     // build UDP header
    2249                     dev_nic_tx_build_udp_header( k_trsp_base,
    2250                                                  socket_xp,
    2251                                                  nbytes );
    2252 
    2253                     // update "tx_todo" in socket descriptor
    2254                     hal_remote_s32( XPTR(socket_cxy , &socket_ptr->tx_todo), todo - nbytes );
    2255 
    2256                     // send UDP packet
    2257                     trsp_length = UDP_HEAD_LEN + nbytes;
    2258                     do_send     = true;
    2259 
    2260 #if( DEBUG_DEV_NIC_TX & 1)
    2261 cycle = (uint32_t)hal_get_cycles();
     2429                // compute payload length
     2430                nbytes = ( CONFIG_SOCK_PAYLOAD_MAX < todo ) ? CONFIG_SOCK_PAYLOAD_MAX : todo;
     2431
     2432                // move payload from remote socket tx_buf to local kernel buffer
     2433                hal_remote_memcpy( XPTR( local_cxy  , k_trsp_base + UDP_HEAD_LEN ),
     2434                                   XPTR( socket_cxy , tx_buf + (len - todo) ),
     2435                                   nbytes );
     2436
     2437                // build UDP header
     2438                dev_nic_tx_build_udp_header( k_trsp_base,
     2439                                             socket_xp,
     2440                                             nbytes );
     2441
     2442                // update "tx_todo" in socket descriptor
     2443                hal_remote_s32( XPTR(socket_cxy , &socket_ptr->tx_todo), todo - nbytes );
     2444
     2445                // set UDP packet length
     2446                trsp_length = UDP_HEAD_LEN + nbytes;
     2447
     2448#if DEBUG_DEV_NIC_TX
    22622449if( cycle > DEBUG_DEV_NIC_TX )
    2263 printk("\n[%s] thread[%x,%x] socket[%x,%d] UDP packet build / length %d / cycle %d\n",
    2264 __FUNCTION__, this->process->pid, this->trdid, trsp_length , cycle );
    2265 #endif
    2266                     if( nbytes == todo )    // last byte sent
    2267                     {
    2268                         // reset tx_valid
    2269                         hal_remote_s32( XPTR( socket_cxy , &socket_ptr->tx_valid ) , false );
    2270 
    2271                         // report success to TX client
    2272                         dev_nic_unblock_tx_client( socket_xp , CMD_STS_SUCCESS );
    2273                     }
    2274                 }
    2275                 else // CONNECT, ACCEPT, or CLOSE commands are illegal for UDP
    2276                 {
    2277                     // reset tx_valid
    2278                     hal_remote_s32( XPTR( socket_cxy , &socket_ptr->tx_valid ) , false );
    2279 
    2280                     // report error
    2281                     dev_nic_unblock_tx_client( socket_xp , CMD_STS_BADCMD );
    2282                 }
     2450printk("\n[%s] socket[%x,%d] UDP packet build / %d bytes\n",
     2451__FUNCTION__, socket_pid, socket_fdid, nbytes );
     2452#endif
     2453            }
     2454            else // CONNECT, ACCEPT, or CLOSE commands are illegal for UDP
     2455            {
     2456
     2457#if DEBUG_DEV_NIC_ERROR
     2458printk("\n[ERROR] in %s : bad state %s for socket[%x,%x] / cycle %d\n",
     2459__FUNCTION__, socket_state_str(socket_state), socket_pid, socket_fdid, cycle );
     2460#endif
     2461                return  CMD_STS_BADCMD;
    22832462            }
    22842463        }  // end UDP
    22852464
    22862465        ///////////////////////////////////////////////////////////
    2287         // 3. TCP : build TCP segment and update TCP socket state
     2466        // 2. TCP : build TCP segment and update TCP socket state
    22882467        else if( socket_type == SOCK_STREAM )
    22892468        {
     
    22962475                socket_r2tq_xp = XPTR( socket_cxy , &socket_ptr->r2tq );
    22972476       
    2298                 // get one request from R2T queue 
    2299                 remote_buf_get_to_kernel( socket_r2tq_xp , &r2t_flags , 1 );
     2477                // get one request from R2T queue, and update R2T queue
     2478                socket_get_r2t_request( socket_r2tq_xp , &r2t_flags );
    23002479            }
    23012480            else
     
    23112490                {
    23122491                    // initialises socket tx_nxt, and rx_wnd
    2313                     hal_remote_s32(XPTR(socket_cxy , &socket_ptr->tx_nxt), TCP_ISS_CLIENT );
    2314                     hal_remote_s32(XPTR(socket_cxy , &socket_ptr->rx_wnd), TCP_MAX_WINDOW );
     2492                    hal_remote_s32(XPTR(socket_cxy , &socket_ptr->tx_nxt),
     2493                                   CONFIG_SOCK_ISS_CLIENT );
     2494                    hal_remote_s32(XPTR(socket_cxy , &socket_ptr->rx_wnd),
     2495                                   CONFIG_SOCK_MAX_WINDOW );
    23152496
    23162497                    // build TCP SYN segment
     
    23192500                                                 0,        // length
    23202501                                                 TCP_FLAG_SYN );   
    2321                     // send segment
     2502                    // set TCP packet length
    23222503                    trsp_length = TCP_HEAD_LEN;
    2323                     do_send     = true;
    2324 
    2325 #if DEBUG_DEV_NIC_TX
    2326 cycle = (uint32_t)hal_get_cycles();
    2327 if( cycle > DEBUG_DEV_NIC_TX )
    2328 printk("\n[%s] thread[%x,%x] socket[%x,%d] %s / CONNECT / "
    2329 "TCP SYN build / cycle %d\n",
    2330 __FUNCTION__, this->process->pid, this->trdid, pid, fdid,
    2331 socket_state_str( socket_state ), cycle );
    2332 #endif
     2504
    23332505                    // update socket.state
    23342506                    hal_remote_s32( XPTR( socket_cxy , &socket_ptr->state ),
     
    23372509                    // update socket.tx_nxt
    23382510                    hal_remote_s32( XPTR( socket_cxy , &socket_ptr->tx_nxt ),
    2339                                           TCP_ISS_CLIENT + 1 );
    2340 
    2341                     // reset tx_valid but do not unblock client thread
    2342                     hal_remote_s32( XPTR( socket_cxy , &socket_ptr->tx_valid ), false );
     2511                                          CONFIG_SOCK_ISS_CLIENT + 1 );
     2512#if DEBUG_DEV_NIC_TX
     2513if( cycle > DEBUG_DEV_NIC_TX )
     2514printk("\n[%s] socket[%x,%d] %s / CONNECT / TCP SYN build\n",
     2515__FUNCTION__, socket_pid, socket_fdid, socket_state_str(socket_state) );
     2516#endif
    23432517                }
    23442518                else                      // report error for all other socket states
    23452519                {
    2346                     // reset tx_valid
    2347                     hal_remote_s32( XPTR( socket_cxy , &socket_ptr->tx_valid ) , false );
    2348                    
    2349                     // report error
    2350                     dev_nic_unblock_tx_client( socket_xp , CMD_STS_BADSTATE );
     2520
     2521#if DEBUG_DEV_NIC_ERROR
     2522printk("\n[ERROR] in %s : bad state %s socket[%x,%x] / cycle %d\n",
     2523__FUNCTION__, socket_state_str(socket_state), socket_pid, socket_fdid, cycle );
     2524#endif
     2525                    return CMD_STS_BADSTATE;
    23512526                }
    23522527            }
     
    23582533                {
    23592534                    // initialize socket tx_nxt, and rx_wnd
    2360                     hal_remote_s32(XPTR(socket_cxy , &socket_ptr->tx_nxt), TCP_ISS_SERVER );
    2361                     hal_remote_s32(XPTR(socket_cxy , &socket_ptr->rx_wnd), CONFIG_SOCK_RX_BUF_SIZE);
     2535                    hal_remote_s32(XPTR(socket_cxy , &socket_ptr->tx_nxt),
     2536                                        CONFIG_SOCK_ISS_SERVER );
     2537                    hal_remote_s32(XPTR(socket_cxy , &socket_ptr->rx_wnd),
     2538                                        (1 << CONFIG_SOCK_RX_BUF_ORDER) );
    23622539               
    23632540                    // build TCP ACK-SYN segment
     
    23662543                                                 0,         //  length
    23672544                                                 TCP_FLAG_SYN | TCP_FLAG_ACK );
    2368                     // send segment
     2545                    // set TCP packet length
    23692546                    trsp_length = TCP_HEAD_LEN;
    2370                     do_send     = true;
    2371 
    2372 #if DEBUG_DEV_NIC_TX
    2373 cycle = (uint32_t)hal_get_cycles();
    2374 if( cycle > DEBUG_DEV_NIC_TX )
    2375 printk("\n[%s] thread[%x,%x] socket[%x,%d] %s / ACCEPT / send SYN-ACK / cycle %d\n",
    2376 __FUNCTION__, this->process->pid, this->trdid, pid, fdid,
    2377 socket_state_str( socket_state ), cycle );
    2378 #endif
     2547
    23792548                    // update socket.state
    23802549                    hal_remote_s32( XPTR( socket_cxy , &socket_ptr->state ),
     
    23832552                    // update socket.tx_nxt
    23842553                    hal_remote_s32( XPTR( socket_cxy , &socket_ptr->tx_nxt ),
    2385                                           TCP_ISS_SERVER + 1 );
    2386 
    2387                     // reset tx_valid but do not unblock client thread
    2388                     hal_remote_s32( XPTR( socket_cxy , &socket_ptr->tx_valid ), false );
     2554                                          CONFIG_SOCK_ISS_SERVER + 1 );
     2555#if DEBUG_DEV_NIC_TX
     2556if( cycle > DEBUG_DEV_NIC_TX )
     2557printk("\n[%s] socket[%x,%d] %s / ACCEPT / SYN-ACK build\n",
     2558__FUNCTION__, socket_pid, socket_fdid, socket_state_str(socket_state) );
     2559#endif
    23892560                }
    23902561                else                     // report error in all other socket states
    23912562                {
    2392                     // reset tx_valid
    2393                     hal_remote_s32( XPTR( socket_cxy , &socket_ptr->tx_valid ), false );
    2394 
    2395                     // report error to TX client thread
    2396                     dev_nic_unblock_tx_client( socket_xp , CMD_STS_BADSTATE );
     2563
     2564#if DEBUG_DEV_NIC_ERROR
     2565printk("\n[ERROR] in %s : bad state %s for socket[%x,%x] / cycle %d\n",
     2566__FUNCTION__, socket_state_str(socket_state), socket_pid, socket_fdid, cycle );
     2567#endif
     2568                    return CMD_STS_BADSTATE;
    23972569                }
    23982570            }
     
    24232595                    hal_remote_s32( XPTR(socket_cxy , &socket_ptr->tx_nxt), tx_nxt + 1 );
    24242596
    2425                     // send segment
     2597                    // set TCP packet length
    24262598                    trsp_length = TCP_HEAD_LEN;
    2427                     do_send     = true;
    24282599
    24292600#if DEBUG_DEV_NIC_TX
    2430 cycle = (uint32_t)hal_get_cycles();
    24312601if( cycle > DEBUG_DEV_NIC_TX )
    2432 printk("\n[%s] thread[%x,%x] socket[%x,%d] %s / CLOSE / send FIN-ACK / cycle %d\n",
    2433 __FUNCTION__, this->process->pid, this->trdid, pid, fdid,
    2434 socket_state_str( socket_state ), cycle );
    2435 #endif
    2436                     // reset tx_valid but do not unblock client thread
    2437                     hal_remote_s32( XPTR( socket_cxy , &socket_ptr->tx_valid ), false );
     2602printk("\n[%s] socket[%x,%d] %s / CLOSE / FIN-ACK build\n",
     2603__FUNCTION__, socket_pid, socket_fdid, socket_state_str(socket_state) );
     2604#endif
    24382605                }
    24392606                else                                 // all other states => signal error
    24402607                {
    24412608
    2442 #if DEBUG_DEV_NIC_TX
    2443 cycle = (uint32_t)hal_get_cycles();
    2444 if( cycle > DEBUG_DEV_NIC_TX )
    2445 printk("\n[%s] thread[%x,%x] socket[%x,%d] %s / CLOSE / error BADSTATE / cycle %d\n",
    2446 __FUNCTION__, this->process->pid, this->trdid, pid, fdid,
    2447 socket_state_str( socket_state ), cycle );
    2448 #endif
    2449                     // reset tx_valid
    2450                     hal_remote_s32( XPTR( socket_cxy , &socket_ptr->tx_valid ), false );
    2451 
    2452                     // report error
    2453                     dev_nic_unblock_tx_client( socket_xp , CMD_STS_BADSTATE );
     2609#if DEBUG_DEV_NIC_ERROR
     2610printk("\n[ERROR] in %s : bad state %s for socket[%x,%x] / cycle %d\n",
     2611__FUNCTION__, socket_state_str(socket_state),  socket_pid, socket_fdid, cycle );
     2612#endif
     2613                    return CMD_STS_BADSTATE;
    24542614                }
    24552615            }
    2456             /////////////////////////////////////
     2616            //////////////////////////////////
    24572617            else if( cmd_type == CMD_TX_SEND )
    24582618            {
     
    24642624
    24652625                    // compute actual payload length
    2466                     nbytes = ( PAYLOAD_MAX_LEN < todo ) ? PAYLOAD_MAX_LEN : todo;
    2467 
    2468                     // compute TCP segment base in kernel buffer
    2469                     k_trsp_base = k_buf + ETH_HEAD_LEN + IP_HEAD_LEN;
    2470 
    2471                     // move payload to k_buf
    2472                     memcpy( k_trsp_base + TCP_HEAD_LEN,
    2473                             tx_buf + (len - todo),
    2474                             nbytes );
     2626                    nbytes = ( CONFIG_SOCK_PAYLOAD_MAX < todo ) ?
     2627                             CONFIG_SOCK_PAYLOAD_MAX : todo;
     2628
     2629                    // move payload from remote tx_buf to local kernel buffer
     2630                    hal_remote_memcpy( XPTR( local_cxy  , k_trsp_base + TCP_HEAD_LEN ),
     2631                                       XPTR( socket_cxy , tx_buf + (len - todo) ),
     2632                                       nbytes );
    24752633
    24762634                    // build TCP header
     
    24862644                    hal_remote_s32( XPTR(socket_cxy , &socket_ptr->tx_nxt), tx_nxt + nbytes );
    24872645
    2488                     // send TCP segment
     2646                    // set TCP packet length
    24892647                    trsp_length = TCP_HEAD_LEN + nbytes;
    2490                     do_send     = true;
    2491 
    2492                     if( todo == nbytes )   // last byte sent
    2493                     {
    2494                         // reset tx_valid when last byte has been sent
    2495                         hal_remote_s32( XPTR(socket_cxy , &socket_ptr->tx_valid), false );
    2496                     }
    24972648
    24982649#if DEBUG_DEV_NIC_TX
    2499 cycle = (uint32_t)hal_get_cycles();
    25002650if( cycle > DEBUG_DEV_NIC_TX )
    2501 printk("\n[%s] thread[%x,%x] socket[%x,%d] %s / SEND / "
    2502 "TCP DATA build / payload %d / cycle %d\n",
    2503 __FUNCTION__, this->process->pid, this->trdid, pid, fdid,
    2504 socket_state_str( socket_state ), nbytes, cycle );
     2651printk("\n[%s] socket[%x,%d] %s / SEND / %d bytes\n",
     2652__FUNCTION__, socket_pid, socket_fdid, socket_state_str(socket_state), nbytes );
    25052653#endif
    25062654                }
    25072655                else  // all other socket states
    25082656                {
    2509                     // reset tx_valid
    2510                     hal_remote_s32( XPTR( socket_cxy , &socket_ptr->tx_valid ), false );
    2511 
    2512                     // report error to TX client thread
    2513                     dev_nic_unblock_tx_client( socket_xp , CMD_STS_BADSTATE );
     2657
     2658#if DEBUG_DEV_NIC_ERROR
     2659printk("\n[ERROR] in %s : bad state %s for socket[%x,%x] / cycle %d\n",
     2660__FUNCTION__, socket_state_str(socket_state), socket_pid, socket_fdid, cycle );
     2661#endif
     2662                    return CMD_STS_BADSTATE;
    25142663                }
    25152664            }
     
    25172666            else  // undefined TX command type
    25182667            {
    2519                 // reset tx_valid
    2520                 hal_remote_s32( XPTR( socket_cxy , &socket_ptr->tx_valid ), false );
    2521 
    2522                 // report error to TX client thread
    2523                 dev_nic_unblock_tx_client( socket_xp , CMD_STS_BADCMD );
     2668
     2669#if DEBUG_DEV_NIC_ERROR
     2670printk("\n[ERROR] in %s : undefined command type for socket[%x,%x] %s / cycle %d\n",
     2671__FUNCTION__, socket_pid, socket_fdid, socket_state_str(socket_state), cycle );
     2672#endif
     2673                return CMD_STS_BADCMD;
    25242674            }
    25252675        }  // end TCP
     
    25272677    else         // no valid TX command => handle R2T request only
    25282678    {
     2679
     2680assert( __FUNCTION__ , (socket_type == SOCK_STREAM) , "don't use R2T queue for UDP" );
     2681 
    25292682        // get one request from R2T queue
    2530         remote_buf_get_to_kernel( socket_r2tq_xp , &r2t_flags , 1 );
     2683        socket_get_r2t_request( socket_r2tq_xp , &r2t_flags );
    25312684
    25322685#if DEBUG_DEV_NIC_TX
    25332686cycle = (uint32_t)hal_get_cycles();
    25342687if( cycle > DEBUG_DEV_NIC_TX )
    2535 printk("\n[%s] thread[%x,%x] only r2t_valid for socket[%x,%d] / flags %x / cycle %d\n",
    2536 __FUNCTION__, this->process->pid, this->trdid, pid, fdid, r2t_flags, cycle );
    2537 #endif
    2538 
     2688printk("\n[%s] socket[%x,%d] %s / send only flags %x / no data\n",
     2689__FUNCTION__, socket_pid, socket_fdid, socket_state_str(socket_state), r2t_flags );
     2690#endif
    25392691        // build TCP header
    25402692        dev_nic_tx_build_tcp_header( k_trsp_base,
    25412693                                     socket_xp,
    2542                                      0,             // payload length
     2694                                     0,             // no payload
    25432695                                     r2t_flags );   // flags
    2544         // send TCP segment
     2696        // set protocol
    25452697        trsp_protocol = PROTOCOL_TCP;
     2698
     2699        // set TCP packet length
    25462700        trsp_length   = TCP_HEAD_LEN;
    2547         do_send       = true;
    25482701    }
    25492702
    2550     // 4. release the lock protecting the socket
    2551     remote_queuelock_release( socket_lock_xp );
    2552 
    2553     // return if no packet to send
    2554     if( do_send == false ) return;
    2555 
    2556     // 5. build IP header
     2703    // 3. build IP header
    25572704    dev_nic_tx_build_ip_header( k_buf + ETH_HEAD_LEN,
    25582705                                src_ip_addr,
     
    25612708                                trsp_length );
    25622709
    2563 #if( DEBUG_DEV_NIC_TX & 1)
    2564 cycle = (uint32_t)hal_get_cycles();
    2565 if( cycle > DEBUG_DEV_NIC_TX )
    2566 printk("\n[%s] thread[%x,%x] IP header build / length %d / cycle %d\n",
    2567 __FUNCTION__, this->process->pid, this->trdid, IP_HEAD_LEN + trsp_length , cycle );
    2568 #endif
    2569 
    2570     // 6. build ETH header
     2710    // 4. build ETH header
    25712711    dev_nic_tx_build_eth_header( k_buf,
    25722712                                 (uint8_t)DST_MAC_5,
     
    25842724                                 IP_HEAD_LEN + trsp_length );
    25852725
    2586 #if( DEBUG_DEV_NIC_TX & 1)
    2587 cycle = (uint32_t)hal_get_cycles();
    2588 if( cycle > DEBUG_DEV_NIC_TX )
    2589 printk("\n[%s] thread[%x,%x] ETH header build / cycle %d\n",
    2590 __FUNCTION__, this->process->pid, this->trdid, cycle );
    2591 #endif
    2592 
    2593     // 7. move packet to NIC_TX queue (blocking function)
    2594     dev_nic_tx_move_packet( chdev,
    2595                             k_buf,
    2596                             ETH_HEAD_LEN + IP_HEAD_LEN + trsp_length );
    2597 
    25982726#if DEBUG_DEV_NIC_TX
    25992727cycle = (uint32_t)hal_get_cycles();
    26002728if( cycle > DEBUG_DEV_NIC_TX )
    2601 printk("\n[%s] thread[%x,%x] for socket[%x,%d] moved packet to NIC_TX / cycle %d\n",
    2602 __FUNCTION__, this->process->pid, this->trdid, pid, fdid, cycle );
    2603 #endif
     2729printk("\n[%s] exit for socket[%x,%d] / packet build / cycle %d\n",
     2730__FUNCTION__, socket_pid, socket_fdid, cycle );
     2731#endif
     2732
     2733    // return success and total packet length
     2734    *total_length = ETH_HEAD_LEN + IP_HEAD_LEN + trsp_length;
     2735    return CMD_STS_SUCCESS;
    26042736
    26052737}  // end dev_nic_tx_build_packet()
    2606 
    26072738
    26082739/////////////////////////////////////////
    26092740void dev_nic_tx_server( chdev_t * chdev )
    26102741{
    2611     uint8_t       k_buf[CONFIG_SOCK_PKT_BUF_SIZE];  // buffer for one packet
    2612 
    2613     xptr_t        queue_root_xp;       // extended pointer on sockets list root
    2614     xptr_t        queue_lock_xp;       // extended pointer on lock protecting this list
    2615     xptr_t        socket_xp;           // extended pointer on on registered socket
    2616     socket_t    * socket_ptr;
    2617     cxy_t         socket_cxy;
    2618     xptr_t        iter_xp;             // iterator for loop on registered sockets
    2619     xlist_entry_t temp_root;           // root of temporary list of sockets
    2620     xptr_t        temp_root_xp;        // extended pointer on temporary list of sockets
    2621     uint32_t      temp_nr;             // number of active registered sockets
    2622     bool_t        cmd_valid;           // TX command valid in socket descriptor
    2623     bool_t        r2t_valid;           // valid R2T request in socket descriptor
    2624 
     2742    uint8_t           k_buf[CONFIG_SOCK_PKT_BUF_SIZE];  // buffer for one packet
     2743
     2744    xptr_t            queue_lock_xp;       // extended pointer on lock for sockets list
     2745    xptr_t            root_xp;             // extended pointer on sockets list root
     2746    xptr_t            iter_xp;             // iterator for loop on sockets list
     2747    xptr_t            list_xp;             // extended pointer on socket tx_list field
     2748    xptr_t            socket_xp;           // extended pointer on found socket
     2749    socket_t        * socket_ptr;          // local pointer on found socket
     2750    cxy_t             socket_cxy;          // found socket cluster identifier
     2751    xptr_t            socket_lock_xp;      // extented pointer on found socket lock
     2752    bool_t            cmd_valid;           // TX command valid in socket descriptor
     2753    bool_t            r2t_valid;           // valid R2T request in socket descriptor
     2754    uint32_t          sock_type;           // socket type
     2755    socket_cmd_sts_t  cmd_sts;             // value returned by dev_nic_tx_build_packet()
     2756    socket_cmd_type_t tx_cmd;              // socket TX command type
     2757    uint32_t          tx_todo;             // socket number of bytes not sent yet
     2758    uint32_t          total_length;        // length of the ETH/IP/TCP packet (bytes)
     2759    bool_t            found;               // one active socket found
     2760   
    26252761    thread_t * this = CURRENT_THREAD;
    26262762
     
    26382774"illegal chdev type or direction" );
    26392775
    2640 // check thread can yield
    2641 thread_assert_can_yield( this , __FUNCTION__ );
    2642 
    2643     // build extended pointer on temporary list
    2644     temp_root_xp = XPTR( local_cxy , &temp_root );
    2645                                                          
    2646     // build extended pointer on client sockets queue (lock & root)
     2776    // build extended pointers on client sockets queue lock
    26472777    queue_lock_xp = XPTR( local_cxy , &chdev->wait_lock );
    2648     queue_root_xp = XPTR( local_cxy , &chdev->wait_root );
     2778
     2779    // build extended pointers on client sockets queue root and first item
     2780    root_xp  = XPTR( local_cxy , &chdev->wait_root );
    26492781
    26502782    while( 1 )  // TX server infinite loop
    26512783    {
    2652         // initialize temporary list of registered sockets as empty
    2653         xlist_root_init( temp_root_xp );
    2654         temp_nr = 0;
    2655 
    26562784        // take the lock protecting the client sockets queue
    26572785        remote_busylock_acquire( queue_lock_xp );
    26582786
    2659         // build temporary list of all registered sockets
    2660         if( xlist_is_empty( queue_root_xp ) == false ) 
     2787        found = false;
     2788
     2789        // scan registered sockets to find one active socket
     2790        // with a round robin priority between the registered sockets
     2791        if( xlist_is_empty( root_xp ) == false ) 
    26612792        {
    2662             XLIST_FOREACH( queue_root_xp , iter_xp )
     2793            XLIST_FOREACH( root_xp , iter_xp )
    26632794            {
    2664                 // get client socket cluster and local pointer
     2795                // get client socket cluster and pointers
    26652796                socket_xp  = XLIST_ELEMENT( iter_xp , socket_t , tx_list );
    26662797                socket_ptr = GET_PTR( socket_xp );
    26672798                socket_cxy = GET_CXY( socket_xp );
    26682799
    2669                 // register socket in temporary list
    2670                 xlist_add_last( temp_root_xp , XPTR( socket_cxy , &socket_ptr->tx_temp ));
    2671                 temp_nr++;
    2672             }
     2800                // build extended pointer on socket tx_list field
     2801                list_xp = XPTR( socket_cxy , &socket_ptr->tx_list );
     2802
     2803                // get cmd_valid & r2t_valid from socket descriptor
     2804                cmd_valid = (bool_t)hal_remote_l32( XPTR( socket_cxy , &socket_ptr->tx_valid ));
     2805
     2806                // get r2t_valid from socket descriptor
     2807                r2t_valid = (bool_t)remote_buf_status( XPTR( socket_cxy , &socket_ptr->r2tq ));
     2808
     2809                if( cmd_valid || r2t_valid )    // active => move socket, and exit loop
     2810                {
     2811                    // move selected socket to last position for round-robin
     2812                    xlist_unlink( list_xp );
     2813                    xlist_add_last( root_xp , list_xp );
     2814
     2815                    // exit loop
     2816                    found = true;
     2817                    break;
     2818                }
     2819            }   // end loop on sockets
    26732820        }
    2674 
     2821       
    26752822        // release the lock protecting the client sockets queue
    26762823        remote_busylock_release( queue_lock_xp );
    26772824
    2678         if( temp_nr > 0 )
    2679         {
    2680             // loop on temporary list
    2681             XLIST_FOREACH( temp_root_xp , iter_xp )
    2682             {
    2683                 // get client socket cluster and local pointer
    2684                 socket_xp  = XLIST_ELEMENT( iter_xp , socket_t , tx_temp );
    2685                 socket_ptr = GET_PTR( socket_xp );
    2686                 socket_cxy = GET_CXY( socket_xp );
    2687 
    2688                 // get cmd_valid & t2t_valid from socket descriptor
    2689                 cmd_valid = hal_remote_l32( XPTR( socket_cxy , &socket_ptr->tx_valid ));
    2690 
    2691                 // get r2t_valid from socket descriptor
    2692                 r2t_valid = (bool_t)remote_buf_status( XPTR( socket_cxy , &socket_ptr->r2tq ));
    2693 
    2694                 // test if socket is active
    2695                 if( cmd_valid || r2t_valid )  // active socket
    2696                 {
    2697 
    2698 #if DEBUG_DEV_NIC_TX
    2699 cycle = (uint32_t)hal_get_cycles();
    2700 pid  = hal_remote_l32( XPTR( socket_cxy , &socket_ptr->pid ));
    2701 fdid = hal_remote_l32( XPTR( socket_cxy , &socket_ptr->fdid ));
    2702 if( cycle > DEBUG_DEV_NIC_TX )
    2703 printk("\n[%s] thread[%x,%x] found socket[%x,%d] / cmd_valid %d / r2t_valid %d / cycle %d\n",
    2704 __FUNCTION__, this->process->pid, this->trdid, pid, fdid, cmd_valid, r2t_valid, cycle );
    2705 #endif
    2706                     // build and send one packet/segment for this socket
    2707                     dev_nic_tx_build_packet( cmd_valid,
    2708                                              r2t_valid,
    2709                                              socket_xp,
    2710                                              k_buf,
    2711                                              chdev );
    2712 #if DEBUG_DEV_NIC_TX
    2713 cycle = (uint32_t)hal_get_cycles();
    2714 if( cycle > DEBUG_DEV_NIC_TX )
    2715 dev_nic_packet_display( true,                // is_tx
    2716                         this->process->pid,
    2717                         this->trdid,
    2718                         cycle,
    2719                         k_buf );
    2720 #endif
    2721                 }
    2722                 else                          // inactive socket
    2723                 {
    2724                    temp_nr--;
    2725                 }
    2726             }  // end loop on temporary list
    2727         }
    2728 
    2729         // block & deschedule if no active socket found in current iteration
    2730         if( temp_nr == 0 )
     2825        if( found == false ) // block & deschedule if no active socket
    27312826        {
    27322827 
     
    27372832__FUNCTION__, this->process->pid, this->trdid, cycle );
    27382833#endif
    2739 
    27402834            // block and deschedule
    27412835            thread_block( XPTR( local_cxy , this ) , THREAD_BLOCKED_CLIENT );
     
    27492843#endif
    27502844        }
     2845        else              // handle active socket request
     2846        {
     2847            // avoid warning
     2848            total_length = 0;
     2849 
     2850            // build extended pointer on socket lock
     2851            socket_lock_xp = XPTR( socket_cxy , &socket_ptr->lock );
     2852
     2853            // take socket lock
     2854            remote_queuelock_acquire( socket_lock_xp );
     2855
     2856#if DEBUG_DEV_NIC_TX
     2857cycle = (uint32_t)hal_get_cycles();
     2858pid  = hal_remote_l32( XPTR( socket_cxy , &socket_ptr->pid ));
     2859fdid = hal_remote_l32( XPTR( socket_cxy , &socket_ptr->fdid ));
     2860#endif
     2861
     2862#if DEBUG_DEV_NIC_TX
     2863if( cycle > DEBUG_DEV_NIC_TX )
     2864printk("\n[%s] thread[%x,%x] select socket[%x,%d] / cmd_val %d / r2t_val %d / cycle %d\n",
     2865__FUNCTION__, this->process->pid, this->trdid, pid, fdid, cmd_valid, r2t_valid, cycle );
     2866#endif
     2867            // build one UDP packet / TCP segment
     2868            cmd_sts = dev_nic_tx_build_packet( socket_xp,
     2869                                               k_buf,
     2870                                               &total_length );
     2871#if DEBUG_DEV_NIC_TX
     2872cycle = (uint32_t)hal_get_cycles();
     2873if( cycle > DEBUG_DEV_NIC_TX )
     2874printk("\n[%s] thread[%x,%x] for socket[%x,%x] build packet / %d bytes / sts %d / cycle %d\n",
     2875__FUNCTION__, this->process->pid, this->trdid, pid, fdid, total_length, cmd_sts, cycle );
     2876#endif
     2877            // release socket lock
     2878            remote_queuelock_release( socket_lock_xp );
     2879
     2880            if( cmd_sts == CMD_STS_SUCCESS )    // move packet to TX queue
     2881            {
     2882                // move packet to NIC_TX queue
     2883                dev_nic_tx_move_packet( chdev,
     2884                                        k_buf,
     2885                                        total_length );
     2886#if DEBUG_DEV_NIC_TX
     2887cycle = (uint32_t)hal_get_cycles();
     2888if( cycle > DEBUG_DEV_NIC_TX )
     2889dev_nic_packet_display( pid, fdid, cycle, k_buf );
     2890#endif
     2891                // get socket.type, socket.tx_cmd and socket.tx_todo values
     2892                tx_cmd    = hal_remote_l32( XPTR( socket_cxy , &socket_ptr->tx_cmd ));
     2893                tx_todo   = hal_remote_l32( XPTR( socket_cxy , &socket_ptr->tx_todo ));
     2894                sock_type = hal_remote_l32( XPTR( socket_cxy , &socket_ptr->type ));
     2895
     2896                // client signaling depends on command type and socket type
     2897                if( (tx_cmd == CMD_TX_SEND) &&  (tx_todo == 0) )
     2898                {
     2899                    // reset tx_valid for both UDP and TCP
     2900                    hal_remote_s32( XPTR( socket_cxy , &socket_ptr->tx_valid), false );
     2901
     2902                    // unblock client thread for UDP only
     2903                    if(sock_type == SOCK_DGRAM)
     2904                    dev_nic_unblock_tx_client( socket_xp , CMD_STS_SUCCESS );
     2905                }
     2906                else  // type is CONNECT / ACCEPT / CLOSE
     2907                {
     2908                    // reset tx_valid
     2909                    hal_remote_s32( XPTR( socket_cxy , &socket_ptr->tx_valid), false );
     2910                }
     2911            }
     2912            else                          // signal error to client thread
     2913            {
     2914                // reset tx_valid
     2915                hal_remote_s32( XPTR( socket_cxy , &socket_ptr->tx_valid), false );
     2916
     2917                // unblock tx_client thread
     2918                dev_nic_unblock_tx_client( socket_xp , cmd_sts );
     2919            }
     2920        }   // end active socket handling
    27512921    }   // end infinite while loop
    27522922}  // end dev_nic_tx_server()
    27532923
    27542924
    2755 /////////////////////////////////////////////
    2756 void dev_nic_packet_display( bool_t    is_tx,
    2757                              pid_t     thread_pid,
    2758                              trdid_t   thread_trdid,
     2925
     2926
     2927
     2928//////////////////////////////////////////////////
     2929void dev_nic_packet_display( pid_t     socket_pid,
     2930                             uint32_t  socket_fdid,
    27592931                             uint32_t  cycle,
    27602932                             uint8_t * buf )
     
    28152987    remote_busylock_acquire( lock_xp );
    28162988
    2817     if( is_tx )
    2818     {
    2819         nolock_printk("\n*** NIC_TX server thread[%x,%x] send packet / cycle %d\n",
    2820         thread_pid, thread_trdid, cycle );
    2821     }
    2822     else
    2823     {
    2824         nolock_printk("\n*** NIC_RX server thread[%x,%x] get packet / cycle %d\n",
    2825         thread_pid, thread_trdid, cycle );
    2826     }
    2827 
    2828     nolock_printk("\n***** ETH header *****\n");
     2989    nolock_printk("\n***** packet sent by NIC_TX server for socket[%x,%d] / cycle %d\n",
     2990    socket_pid, socket_fdid, cycle );
     2991
     2992    nolock_printk(" ETH header\n");
    28292993    nolock_printk(" - dst_mac  [6] = %l\n" , eth_dst_mac );
    28302994    nolock_printk(" - src_mac  [6] = %l\n" , eth_src_mac );
    28312995    nolock_printk(" - length   [2] = %d\n" , (uint32_t)eth_length );
    2832     nolock_printk("***** IP  header *****\n");
     2996    nolock_printk(" IP  header\n");
    28332997    nolock_printk(" - version  [1] = %x\n" , (uint32_t)ip_version );
    28342998    nolock_printk(" - tos      [1] = %x\n" , (uint32_t)ip_tos );
     
    28503014                                ((uint16_t)buf[37]     ) ;
    28513015
    2852         nolock_printk("***** UDP header *****\n");
     3016        nolock_printk(" UDP header\n");
    28533017        nolock_printk(" - src_port [2] = %d\n" , (uint32_t)udp_src_port );
    28543018        nolock_printk(" - dst_port [2] = %d\n" , (uint32_t)udp_dst_port );
     
    28813045                                 ((uint16_t)buf[53]     ) ;
    28823046
    2883         nolock_printk("***** TCP header *****\n");
     3047        nolock_printk(" TCP header\n");
    28843048        nolock_printk(" - src_port [2] = %x\n" , (uint32_t)tcp_src_port );
    28853049        nolock_printk(" - dst_port [2] = %x\n" , (uint32_t)tcp_dst_port );
  • trunk/kernel/devices/dev_nic.h

    r674 r683  
    9999 *
    100100 * - GET_KEY      : get channel index from remote IP address and port
    101  * - SET_RUN      : activate/desactivate one channel
     101 * - SET_RUN      : activate/desactivate one channel (both directions)
    102102 * - GET_INSTRU   : get one instrumentation counter value
    103103 * - CLEAR_INSTRU : reset all instrumentation counters
     
    140140#define PROTOCOL_TCP           0x06
    141141
    142 #define TCP_ISS_CLIENT         0x10000      // initial sequence number for TCP client
    143 #define TCP_ISS_SERVER         0x20000      // initial sequence number for TCP server
    144 #define TCP_MAX_WINDOW         0xFFFFF      // initial TCP send window
    145 
    146 #define PAYLOAD_MAX_LEN        1500         // max length for an UDP packet / TCP segment
    147 
    148142#define TCP_FLAG_FIN           0x01
    149143#define TCP_FLAG_SYN           0x02
     
    152146#define TCP_FLAG_ACK           0x10
    153147#define TCP_FLAG_URG           0x20
    154 
    155 #define TCP_RETRANSMISSION_TIMEOUT  10000000
    156148
    157149/*****************************************************************************************
     
    192184 *   in the server thread descriptor, to access the NIC_RX & NIC_TX packet queues.
    193185 *   The buffer is always a 2K bytes kernel buffer, containing an Ethernet packet.
    194  * - The next 4 synchronous commands are used by the client th, and stored in the
     186 * - The next 4 synchronous commands are used by the client thread, and stored in the
    195187 *   client thread descriptor, to directly access the NIC registers.
    196188 ****************************************************************************************/
     
    212204    xptr_t      dev_xp;       /*! extended pointer on NIC chdev descriptor              */
    213205    nic_cmd_t   type;         /*! command type                                          */
    214     uint8_t   * buffer;       /*! local pointer on kernel buffer                        */ 
    215     uint32_t    length;       /*! number of bytes in buffer                             */
     206    uint8_t   * buffer;       /*! local pointer on kernel buffer (when READ / WRITE)    */ 
     207    uint32_t    length;       /*! number of bytes in buffer (when READ / WRITE )        */
    216208    uint32_t    status;       /*! return value (depends on command type)                */
    217209    uint32_t    error;        /*! return an error from the hardware (0 if no error)     */
     
    282274 * This TX server thread is created by the dev_nic_init() function.
    283275 * It build and send UDP packets or TCP segments for all clients threads registered in
    284  * the NIC_TX[channel] chdev. The command types are (CONNECT / SEND / CLOSE), and the
    285  * priority between clients is round-robin. It takes into account the request registered
    286  * by the RX server thread in the R2T queue associated to the involved socket.
    287  * When a command is completed, it unblocks the client thread. For a SEND command, the
    288  * last byte must have been sent for an UDP socket, and it must have been acknowledged
    289  * for a TCP socket.
    290  * When the TX client threads queue is empty, it blocks on THREAD_BLOCKED_CLIENT
    291  * condition and deschedules. It is re-activated by a client thread registering a command.
     276 * the NIC_TX[channel] chdev. The command types are (CONNECT / ACCEPT / CLOSE / SEND).
     277 * It takes into account the request registered by the RX server thread in the R2T queues.
     278 * The loop on registered sockets implements a round-robin priority between sockets.
     279 * When no registered socket is active, it blocks on the THREAD_BLOCKED_CLIENT condition
     280 * and deschedules. It is re-activated by a client thread registering a command.
    292281 * When the NIC_TX packet queue is full, it blocks on the THREAD_BLOCKED_ISR condition
    293282 * and deschedules. It is reactivated by the NIC_TX DMA engine.
    294283 ******************************************************************************************
    295284 * Implementation note:
    296  * It execute an infinite loop in which it takes the lock protecting the clients list
    297  * to build a "kleenex" list of currently registered clients.
    298  * For each client registered in this "kleenex" list, it takes the lock protecting the
    299  * socket state, build one packet/segment in a local 2K bytes kernel buffer, calls the
    300  * transport layer to add the UDP/TCP header, calls the IP layer to add the IP header,
     285 * At each iteration in the infinite loop, it takes the lock protecting the registered
     286 * client sockets queue to find one active socket (tx_valid or r2t_valid flags set).
     287 * For each registered socket, it takes the lock protecting the socket state, and
     288 * exit the scan when an active socket has been found, without releasing the socket state.
     289 * When the scan is completed, it release the lock protecting the queue, before handling
     290 * the found active socket. The socket lock is released only when the requested packet
     291 * has been build, and the active socket state has been updated.
     292 * To handle a socket request, it calls the transport layer to build the UDP packet or
     293 * TCP segment in a local 2K bytes kernel buffer, calls the IP layer to add the IP header,
    301294 * calls the ETH layer to add the ETH header, and moves the packet to the NIC_TX_QUEUE.
    302  * Finally, it updates the socket state, and release the socket lock.
    303295 ******************************************************************************************
    304296 * @ chdev    : [in] local pointer on one local NIC_TX[channel] chdev descriptor.
     
    331323
    332324/******************************************************************************************
    333  * This function displays all the fields of an ETH/IP/TCP segment or ETH/IP/UDP packet.
    334  ******************************************************************************************
    335  * @ is_tx   : [in] sent packet if true / received packet if false.
     325 * This debug function can be called by the dev_nic_tx_server() function to display
     326 * on TXT0 the header of a TX [ETH/IP/TCP] segment or [ETH/IP/UDP] packet.
     327 ******************************************************************************************
    336328 * @ pid     : [in] process identifier.
    337  * @ trdid   : [in] thread identifier.
     329 * @ fdid    : [in] socket identifier.
    338330 * @ cycle   : [in] date (number of cycles).
    339331 * @ buf     : [in] local pointer on kernel buffer containing the packet.
    340332 *****************************************************************************************/
    341 void dev_nic_packet_display( bool_t    is_tx,
    342                              pid_t     pid,
    343                              trdid_t   trdid,
     333void dev_nic_packet_display( pid_t     pid,
     334                             uint32_t  fdid,
    344335                             uint32_t  cycle,
    345336                             uint8_t * buf );
  • trunk/kernel/fs/devfs.c

    r673 r683  
    5656xptr_t devfs_ctx_alloc( cxy_t cxy )
    5757{
    58     kmem_req_t    req;
    59 
    60         req.type    = KMEM_KCM;
    61         req.order   = bits_log2( sizeof(devfs_ctx_t) );
    62     req.flags   = AF_KERNEL | AF_ZERO;
    63 
    6458    // allocates devfs context from target cluster
    65         return XPTR( cxy , kmem_remote_alloc( cxy , &req ) );
     59        void * ptr = kmem_remote_alloc( cxy,
     60                                    bits_log2(sizeof(devfs_ctx_t)),
     61                                    AF_ZERO );
     62
     63    if( ptr == NULL ) return XPTR_NULL;
     64        else              return XPTR( cxy , ptr );
    6665}
    6766
     
    9089void devfs_ctx_destroy( xptr_t  devfs_ctx_xp )
    9190{
    92     kmem_req_t    req;
    93 
    9491    // get cluster and local pointer on devfs context
    9592    devfs_ctx_t * devfs_ctx_ptr = GET_PTR( devfs_ctx_xp );
    9693    cxy_t         devfs_ctx_cxy = GET_CXY( devfs_ctx_xp );
    9794
    98     req.type = KMEM_KCM;
    99     req.ptr  = devfs_ctx_ptr;
    100 
    10195    // release devfs context descriptor to remote cluster
    102     kmem_remote_free( devfs_ctx_cxy , &req );
     96    kmem_remote_free( devfs_ctx_cxy,
     97                      devfs_ctx_ptr,
     98                      bits_log2(sizeof(devfs_ctx_t)) );
    10399}
    104100
  • trunk/kernel/fs/fatfs.c

    r673 r683  
    16301630xptr_t  fatfs_ctx_alloc( cxy_t  cxy )
    16311631{
    1632     kmem_req_t    req;
    1633 
    16341632    // allocate memory from remote cluster
    1635         req.type     = KMEM_KCM;
    1636         req.order    = bits_log2( sizeof(fatfs_ctx_t) );
    1637     req.flags    = AF_KERNEL | AF_ZERO;
    1638  
    1639     return XPTR( cxy , kmem_remote_alloc( cxy , &req ) );
     1633    void * ptr = kmem_remote_alloc( cxy,
     1634                                    bits_log2(sizeof(fatfs_ctx_t)),
     1635                                    AF_ZERO );
     1636
     1637    if( ptr == NULL ) return XPTR_NULL;
     1638    else              return XPTR( cxy , ptr );
    16401639
    16411640}  //end faffs_ctx_alloc()
     
    16451644{
    16461645    error_t       error;
    1647     kmem_req_t    req;
    16481646    cxy_t         cxy;             // FATFS context cluster identifier
    16491647    fatfs_ctx_t * fatfs_ctx_ptr;   // local pointer on FATFS context
     
    16671665    // allocate a 512 bytes buffer in remote cluster, used to store
    16681666    // temporarily the BOOT sector, and permanently the FS_INFO sector
    1669         req.type    = KMEM_KCM;
    1670     req.order   = 9;                    // 512 bytes
    1671     req.flags   = AF_KERNEL | AF_ZERO;
    1672         buffer      = kmem_remote_alloc( cxy , &req );
    1673 
     1667        buffer = kmem_remote_alloc( cxy,
     1668                                9,
     1669                                AF_ZERO );
    16741670    if( buffer == NULL )
    16751671    {
     
    18271823void fatfs_ctx_destroy( xptr_t  fatfs_ctx_xp )
    18281824{
    1829     kmem_req_t   req;
    18301825    mapper_t   * fat_mapper;
    18311826    uint8_t    * fs_info_buffer;
     
    18441839    fs_info_buffer = hal_remote_lpt( XPTR( fatfs_ctx_cxy , &fatfs_ctx_ptr->fs_info_buffer ) );
    18451840
    1846     // release FS_INFO buffer
    1847     req.type = KMEM_KCM;
    1848     req.ptr  = fs_info_buffer;
    1849     kmem_remote_free( fatfs_ctx_cxy , &req );
     1841    // release FS_INFO buffer (512 bytes)
     1842    kmem_remote_free( fatfs_ctx_cxy,
     1843                      fs_info_buffer,
     1844                      9 );               
    18501845
    18511846    // release FATFS context descriptor
    1852     req.type = KMEM_KCM;
    1853     req.ptr  = fatfs_ctx_ptr;
    1854     kmem_remote_free( fatfs_ctx_cxy , &req );
     1847    kmem_remote_free( fatfs_ctx_cxy,
     1848                      fatfs_ctx_ptr,
     1849                      bits_log2(sizeof(fatfs_ctx_t)) );
    18551850
    18561851}  // end fatfs_ctx_destroy()
     
    28572852
    28582853    // compute number of pages
    2859     npages = size >> CONFIG_PPM_PAGE_SHIFT;
     2854    npages = size >> CONFIG_PPM_PAGE_ORDER;
    28602855    if( size & CONFIG_PPM_PAGE_MASK ) npages++;
    28612856         
  • trunk/kernel/fs/vfs.c

    r673 r683  
    4848
    4949//////////////////////////////////////////////////////////////////////////////////////////
    50 //           Extern variables         
     50//           Extern global variables         
    5151//////////////////////////////////////////////////////////////////////////////////////////
    5252
     
    5454extern chdev_directory_t  chdev_dir;                  // allocated in kernel_init.c 
    5555extern char *             lock_type_str[];            // allocated in kernel_init.c
     56extern process_t          process_zero;               // allocated in kernel_init.c
    5657 
    5758///////////////////////////////////////////////////////////////////////////////////////////
     
    186187    uint32_t           inum;         // inode identifier (to be allocated)
    187188    vfs_ctx_t        * ctx;          // file system context
    188         kmem_req_t         req;          // request to kernel memory allocator
    189189    error_t            error;
    190190
     
    192192uint32_t       cycle      = (uint32_t)hal_get_cycles();
    193193thread_t *     this       = CURRENT_THREAD;
     194pid_t          pid        = this->process->pid;
     195trdid_t        trdid      = this->trdid;
    194196#endif
    195197
     
    202204
    203205#if DEBUG_VFS_ERROR
    204 if( DEBUG_VFS_ERROR < cycle )
    205 printk("\n[ERROR] in %s : thread[%x,%x] / illegal FS type\n",
    206 __FUNCTION__ , this->process->pid , this->trdid );
     206printk("\n[ERROR] in %s : thread[%x,%x] / illegal FS type / cycle %d\n",
     207__FUNCTION__ , pid , trdid, cycle );
    207208#endif
    208209        return -1;
     
    220221
    221222#if DEBUG_VFS_ERROR
    222 if( DEBUG_VFS_ERROR < cycle )
    223 printk("\n[ERROR] in %s : thread[%x,%x] cannot allocate inum\n",
    224 __FUNCTION__ , this->process->pid , this->trdid );
     223printk("\n[ERROR] in %s : thread[%x,%x] cannot allocate inum / cycle %d\n",
     224__FUNCTION__ , pid , trdid, cycle );
    225225#endif
    226226        return -1;
     
    234234
    235235#if DEBUG_VFS_ERROR
    236 if( DEBUG_VFS_ERROR < cycle )
    237 printk("\n[ERROR] in %s : thread[%x,%x] cannot allocate mapper\n",
    238 __FUNCTION__ , this->process->pid , this->trdid );
     236printk("\n[ERROR] in %s : thread[%x,%x] cannot allocate mapper / cycle %d\n",
     237__FUNCTION__ , pid , trdid, cycle );
    239238#endif
    240239        vfs_ctx_inum_release( XPTR( cxy , ctx ) , inum );
     
    244243    mapper_ptr = GET_PTR( mapper_xp );
    245244
    246     // allocate one page for VFS inode descriptor
    247     // because the embedded "children" xhtab footprint
    248         req.type   = KMEM_PPM;
    249         req.order  = 0;
    250     req.flags  = AF_KERNEL | AF_ZERO;
    251         inode_ptr  = kmem_remote_alloc( cxy , &req );
    252 
     245    // allocate memory for inode descriptor
     246        inode_ptr  = kmem_remote_alloc( cxy,
     247                                    bits_log2(sizeof(vfs_inode_t)),
     248                                    AF_ZERO );
    253249    if( inode_ptr == NULL )
    254250    {
    255251
    256252#if DEBUG_VFS_ERROR
    257 if( DEBUG_VFS_ERROR < cycle )
    258 printk("\n[ERROR] in %s : thread[%x,%x] cannot allocate inode\n",
    259 __FUNCTION__ , this->process->pid , this->trdidi );
     253printk("\n[ERROR] in %s : thread[%x,%x] cannot allocate inode / cycle %d\n",
     254__FUNCTION__ , pid , trdid, cycle );
    260255#endif
    261256        vfs_ctx_inum_release( XPTR( cxy , ctx ) , inum );
     
    297292if( DEBUG_VFS_INODE_CREATE < cycle )
    298293printk("\n[%s] thread[%x,%x] created inode (%x,%x) / ctx %x / fs_type %d / cycle %d\n",
    299 __FUNCTION__, this->process->pid, this->trdid, cxy, inode_ptr, ctx, ctx->type, cycle );
     294__FUNCTION__, pid, trdid, cxy, inode_ptr, ctx, ctx->type, cycle );
    300295#endif
    301296 
     
    318313
    319314    // release memory allocated for inode descriptor
    320         kmem_req_t req;
    321         req.type  = KMEM_PPM;
    322         req.ptr   = inode_ptr;
    323         kmem_remote_free( inode_cxy , &req );
     315        kmem_remote_free( inode_cxy,
     316                      inode_ptr,
     317                      bits_log2(sizeof(vfs_inode_t)) );
    324318
    325319}  // end vfs_inode_destroy()
     
    447441    uint32_t   size   = hal_remote_l32( XPTR( inode_cxy , &inode_ptr->size ) );
    448442
    449 #if DEBUG_VFS_INODE_LOAD_ALL
    450 char       name[CONFIG_VFS_MAX_NAME_LENGTH];
     443#if DEBUG_VFS_INODE_LOAD_ALL || DEBUG_VFS_ERROR
    451444uint32_t   cycle = (uint32_t)hal_get_cycles();
    452445thread_t * this  = CURRENT_THREAD;
     446#endif
     447
     448#if DEBUG_VFS_INODE_LOAD_ALL
     449char name[CONFIG_VFS_MAX_NAME_LENGTH];
    453450vfs_inode_get_name( inode_xp , name );
    454451if( DEBUG_VFS_INODE_LOAD_ALL < cycle )
     
    458455
    459456    // compute number of pages
    460     uint32_t npages = size >> CONFIG_PPM_PAGE_SHIFT;
     457    uint32_t npages = size >> CONFIG_PPM_PAGE_ORDER;
    461458    if( (size & CONFIG_PPM_PAGE_MASK) || (size == 0) ) npages++;
    462459
     
    468465        page_xp = mapper_get_page( XPTR( inode_cxy , mapper ), page_id );
    469466
    470         if( page_xp == XPTR_NULL ) return -1;
     467        if( page_xp == XPTR_NULL )
     468        {
     469
     470#if DEBUG_VFS_ERROR
     471printk("\n[ERROR] in %s : thread[%x,%x] cannot allocate memory for mapper / cycle %d\n",
     472__FUNCTION__, this->process->pid, this->trdid, cycle );
     473#endif
     474            return -1;
     475        }
    471476    }
    472477
     
    534539                           xptr_t        * dentry_xp )
    535540{
    536         kmem_req_t       req;            // request to kernel memory allocator
    537541    vfs_ctx_t      * ctx = NULL;     // context descriptor
    538542    vfs_dentry_t   * dentry_ptr;     // dentry descriptor (to be allocated)
     
    557561
    558562#if DEBUG_VFS_ERROR
    559 if( DEBUG_VFS_ERROR < cycle )
    560 printk("\n[ERROR] in %s : thread[%x,%x] / undefined fs_type %d\n",
    561 __FUNCTION__ , this->process->pid, this->trdid, fs_type );
     563printk("\n[ERROR] in %s : thread[%x,%x] / undefined fs_type %d / cycle %d\n",
     564__FUNCTION__ , this->process->pid, this->trdid, fs_type, cycle );
    562565#endif
    563566        return -1;
     
    570573
    571574    // allocate memory for dentry descriptor
    572         req.type   = KMEM_KCM;
    573         req.order  = bits_log2( sizeof(vfs_dentry_t) );
    574     req.flags  = AF_KERNEL | AF_ZERO;
    575         dentry_ptr = kmem_remote_alloc( cxy , &req );
    576 
     575        dentry_ptr = kmem_remote_alloc( cxy,
     576                                    bits_log2(sizeof(vfs_dentry_t)),
     577                                    AF_ZERO );
    577578    if( dentry_ptr == NULL )
    578579    {
    579580
    580581#if DEBUG_VFS_ERROR
    581 if( DEBUG_VFS_ERROR < cycle )
    582 printk("\n[ERROR] in %s : thread[%x,%x] cannot allocate dentry descriptor\n",
    583 __FUNCTION__ , this->process->pid, this->trdid );
     582printk("\n[ERROR] in %s : thread[%x,%x] cannot allocate dentry descriptor / cycle %d\n",
     583__FUNCTION__ , this->process->pid, this->trdid, cycle );
    584584#endif
    585585        return -1;
     
    616616 
    617617    // release memory allocated to dentry
    618         kmem_req_t req;
    619         req.type  = KMEM_KCM;
    620         req.ptr   = dentry_ptr;
    621         kmem_remote_free( dentry_cxy , &req );
     618        kmem_remote_free( dentry_cxy,
     619                      dentry_ptr,
     620                      bits_log2(sizeof(vfs_dentry_t)) );
    622621
    623622}  // end vfs_dentry_destroy()
     
    634633{
    635634    vfs_file_t   * file_ptr;
    636         kmem_req_t     req;
    637635    uint32_t       type;
    638636    mapper_t     * mapper;
     
    644642    cxy_t         inode_cxy = GET_CXY( inode_xp );
    645643
     644#if DEBUG_VFS_FILE_CREATE || DEBUG_VFS_ERROR
     645thread_t * this  = CURRENT_THREAD;
     646uint32_t   cycle = (uint32_t)hal_get_cycles();
     647#endif
     648
    646649#if DEBUG_VFS_FILE_CREATE
    647 thread_t * this = CURRENT_THREAD;
    648 uint32_t cycle = (uint32_t)hal_get_cycles();
    649650if( DEBUG_VFS_FILE_CREATE < cycle )
    650651printk("\n[%s] thread[%x,%x] enter for inode (%x,%x) / cycle %d\n",
     
    653654
    654655    // allocate memory for new file descriptor
    655         req.type  = KMEM_KCM;
    656         req.order = bits_log2( sizeof(vfs_file_t) );
    657     req.flags = AF_KERNEL | AF_ZERO;
    658         file_ptr  = kmem_remote_alloc( inode_cxy , &req );
    659 
    660     if( file_ptr == NULL ) return -1;
     656        file_ptr  = kmem_remote_alloc( inode_cxy,
     657                                   bits_log2(sizeof(vfs_file_t)),
     658                                   AF_ZERO );
     659
     660    if( file_ptr == NULL )
     661    {
     662
     663#if DEBUG_VFS_ERROR
     664printk("\n[ERROR] in %s : thread[%x,%x] / cannot allocate memory / cycle %d\n",
     665__FUNCTION__ , this->process->pid, this->trdid, cycle );
     666#endif
     667        return -1;
     668    }
    661669
    662670    // get type, ctx, mapper, and buffer from inode descriptor
     
    718726
    719727    // release file descriptor
    720         kmem_req_t req;
    721         req.type  = KMEM_KCM;
    722         req.ptr   = file_ptr;
    723         kmem_remote_free( file_cxy , &req );
     728        kmem_remote_free( file_cxy,
     729                      file_ptr,
     730                      bits_log2(sizeof(vfs_file_t)) );
    724731
    725732#if DEBUG_VFS_FILE_DESTROY
     
    775782    xptr_t         lock_xp;        // extended pointer on Inode Tree lock
    776783
     784#if DEBUG_VFS_OPEN || DEBUG_VFS_ERROR
     785uint32_t    cycle   = (uint32_t)hal_get_cycles();
     786thread_t  * this    = CURRENT_THREAD;
     787pid_t       pid     = this->process->pid;
     788trdid_t     trdid   = this->trdid;
     789#endif
     790
    777791    if( mode != 0 )
    778792    {
    779         printk("\n[ERROR] in %s : the mode parameter is not supported yet\n" );
     793
     794#if DEBUG_VFS_ERROR
     795printk("\n[ERROR] in %s : the mode parameter is not supported yet\n" );
     796#endif
    780797        return -1;
    781798    }
    782 
    783     thread_t  * this    = CURRENT_THREAD;
    784     process_t * process = this->process;
    785799
    786800    // compute lookup working mode
     
    790804    if( (flags & O_EXCL   )      )  lookup_mode |= VFS_LOOKUP_EXCL;
    791805 
    792 #if DEBUG_VFS_OPEN || DEBUG_VFS_ERROR
    793 uint32_t cycle = (uint32_t)hal_get_cycles();
    794 #endif
    795 
    796806#if DEBUG_VFS_OPEN
    797807if( DEBUG_VFS_OPEN < cycle )
    798808printk("\n[%s] thread[%x,%x] enter for <%s> / root_inode (%x,%x) / cycle %d\n",
    799 __FUNCTION__, process->pid, this->trdid, path, GET_CXY(root_xp), GET_PTR(root_xp), cycle );
     809__FUNCTION__, pid, trdid, path, GET_CXY(root_xp), GET_PTR(root_xp), cycle );
    800810#endif
    801811
     
    809819
    810820    // build extended pointer on lock protecting Inode Tree
    811     vfs_root_xp  = process->vfs_root_xp;
     821    vfs_root_xp  = process_zero.vfs_root_xp;
    812822    vfs_root_ptr = GET_PTR( vfs_root_xp );
    813823    vfs_root_cxy = GET_CXY( vfs_root_xp );
     
    831841
    832842#if DEBUG_VFS_ERROR
    833 if( DEBUG_VFS_ERROR < cycle )
    834 printk("\n[ERROR] in %s : thread[%x,%x] cannot get inode <%s>\n",
    835 __FUNCTION__ , process->pid, this->trdid , path );
     843printk("\n[ERROR] in %s : thread[%x,%x] cannot get inode <%s> / cycle %d\n",
     844__FUNCTION__ , pid, trdid , path , cycle );
    836845#endif
    837846        return -1;
     
    843852   
    844853#if (DEBUG_VFS_OPEN & 1)
    845 cycle = (uint32_t)hal_get_cycles();
    846854if( DEBUG_VFS_OPEN < cycle )
    847855printk("\n[%s] thread[%x,%x] found inode(%x,%x) for <%s>\n",
    848 __FUNCTION__, process->pid, this->trdid, inode_cxy, inode_ptr, path );
     856__FUNCTION__, pid, trdid, inode_cxy, inode_ptr, path );
    849857#endif
    850858
     
    852860    error = vfs_file_create( inode_xp , file_attr , &file_xp );
    853861
    854     if( error )  return error;
     862    if( error )
     863    {
     864
     865#if DEBUG_VFS_ERROR
     866printk("\n[ERROR] in %s : thread[%x,%x] cannot create file descriptor for <%s> / cycle %d\n",
     867__FUNCTION__ , pid, trdid , path , cycle );
     868#endif
     869        return error;
     870    }
    855871
    856872#if (DEBUG_VFS_OPEN & 1)
    857 cycle = (uint32_t)hal_get_cycles();
    858873if( DEBUG_VFS_OPEN < cycle )
    859874printk("\n[%s] thread[%x,%x] created file descriptor (%x,%x) for <%s>\n",
    860 __FUNCTION__, process->pid, this->trdid, GET_CXY(file_xp), GET_PTR(file_xp), path );
     875__FUNCTION__, pid, trdid, GET_CXY(file_xp), GET_PTR(file_xp), path );
    861876#endif
    862877
     
    864879    error = process_fd_register( process_xp , file_xp , &file_id );
    865880
    866     if( error ) return error;
     881    if( error )
     882    {
     883
     884#if DEBUG_VFS_ERROR
     885printk("\n[ERROR] in %s : thread[%x,%x] cannot register file descriptor for <%s> / cycle %d\n",
     886__FUNCTION__ , pid, trdid , path , cycle );
     887#endif
     888        return error;
     889    }
    867890
    868891    // get new file descriptor cluster and local pointer
     
    891914if( DEBUG_VFS_OPEN < cycle )
    892915printk("\n[%s] thread[%x,%x] exit for <%s> / fdid %d / file(%x,%x) / cycle %d\n",
    893 __FUNCTION__, process->pid, this->trdid, path, file_id,
     916__FUNCTION__, pid, trdid, path, file_id,
    894917GET_CXY( file_xp ), GET_PTR( file_xp ), cycle );
    895918#endif
     
    9971020
    9981021#if DEBUG_VFS_ERROR
    999 if( DEBUG_VFS_ERROR < cycle )
    1000 printk("\n[ERROR] in %s thread[%x,%x] cannot move data",
    1001 __FUNCTION__, this->process->pid, this->trdid );
     1022printk("\n[ERROR] in %s thread[%x,%x] cannot move data / cycle %d",
     1023__FUNCTION__, this->process->pid, this->trdid, cycle );
    10021024#endif
    10031025        return -1;
     
    10081030
    10091031#if DEBUG_VFS_USER_MOVE
    1010 cycle = (uint32_t)hal_get_cycles();
    10111032if( cycle > DEBUG_VFS_USER_MOVE )
    10121033{
     
    10321053    cxy_t              file_cxy;     // remote file descriptor cluster
    10331054    vfs_file_t       * file_ptr;     // remote file descriptor local pointer
    1034     vfs_file_type_t   inode_type;   // remote file type
    10351055    uint32_t           file_offset;  // current offset in file
    10361056    mapper_t         * mapper_ptr;   // remote mapper local pointer
     
    10411061assert( __FUNCTION__, (file_xp != XPTR_NULL) , "file_xp == XPTR_NULL" );
    10421062
     1063#if DEBUG_VFS_KERNEL_MOVE || DEBUG_VFS_ERROR
     1064uint32_t      cycle      = (uint32_t)hal_get_cycles();
     1065thread_t    * this       = CURRENT_THREAD;
     1066#endif
     1067
    10431068    // get cluster and local pointer on remote file descriptor
    10441069    file_cxy  = GET_CXY( file_xp );
    10451070    file_ptr  = GET_PTR( file_xp );
    1046 
    1047     // get inode type from remote file descriptor
    1048     inode_type = hal_remote_l32( XPTR( file_cxy , &file_ptr->type   ) );
    1049 
    1050 // check inode type
    1051 assert( __FUNCTION__, (inode_type == FILE_TYPE_REG), "bad file type" );
    10521071
    10531072    // get mapper pointers and file offset from file descriptor
     
    10641083    if( error )
    10651084    {
    1066         printk("\n[ERROR] in %s : cannot move data", __FUNCTION__ );
     1085
     1086#if DEBUG_VFS_ERROR
     1087printk("\n[ERROR] in %s : thread[%x,%x] / cannot move data / cycle %d\n",
     1088__FUNCTION__ , this->process->pid , this->trdid , cycle );
     1089#endif
    10671090        return -1;
     1091
    10681092    }
    10691093
    10701094#if DEBUG_VFS_KERNEL_MOVE
    10711095char          name[CONFIG_VFS_MAX_NAME_LENGTH];
    1072 uint32_t      cycle      = (uint32_t)hal_get_cycles();
    1073 thread_t    * this       = CURRENT_THREAD;
    10741096cxy_t         buffer_cxy = GET_CXY( buffer_xp );
    10751097void        * buffer_ptr = GET_PTR( buffer_xp );
     
    11091131assert( __FUNCTION__, (new_offset != NULL )  , "new_offset == NULL" );
    11101132
     1133#if DEBUG_VFS_LSEEK || DEBUG_VFS_ERROR
     1134uint32_t   cycle = (uint32_t)hal_get_cycles();
     1135thread_t * this  = CURRENT_THREAD;
     1136#endif
     1137
    11111138    // get cluster and local pointer on remote file descriptor
    11121139    file_cxy = GET_CXY( file_xp );
     
    11381165    else
    11391166    {
    1140         printk("\n[ERROR] in %s : illegal whence value\n", __FUNCTION__ );
     1167
     1168#if DEBUG_VFS_ERROR
     1169printk("\n[ERROR] in %s : thread[%x,%x] / undefined whence value / cycle %d",
     1170__FUNCTION__ , this->process->pid , this->trdid , cycle );
     1171#endif
    11411172        remote_rwlock_wr_release( lock_xp );
    11421173        return -1;
     
    11911222    cluster_t * cluster = LOCAL_CLUSTER;
    11921223
     1224#if DEBUG_VFS_CLOSE || DEBUG_VFS_ERROR
     1225uint32_t  cycle = (uint32_t)hal_get_cycles();
     1226#endif
     1227
    11931228    // get file name
    11941229    vfs_file_get_name( file_xp , name );
    11951230   
    11961231#if DEBUG_VFS_CLOSE
    1197 uint32_t cycle = (uint32_t)hal_get_cycles();
    11981232if( DEBUG_VFS_CLOSE < cycle )
    11991233printk("\n[%s] thread[%x,%x] enter for <%s> / cycle %d\n",
     
    12151249    if( error )
    12161250    {
    1217         printk("\n[ERROR] in %s : cannot synchronise dirty pages for <%s>\n",
    1218         __FUNCTION__, name );
     1251
     1252#if DEBUG_VFS_ERROR
     1253printk("\n[ERROR] in %s : thread[%x,%x] / cannot synchronise dirty pages for <%s> / cycle %d\n",
     1254__FUNCTION__ , this->process->pid , this->trdid , name , cycle );
     1255#endif
    12191256        return -1;
    12201257    }
     
    12221259#if DEBUG_VFS_CLOSE
    12231260if( DEBUG_VFS_CLOSE < cycle )
    1224 printk("\n[%s] thread[%x,%x] synchronised mapper of <%s> to device\n",
     1261printk("\n[%s] thread[%x,%x] synchronised <%s> mapper to device\n",
    12251262__FUNCTION__, process->pid, this->trdid, name );
    12261263#endif
     
    12591296        if( error )
    12601297        {
    1261             printk("\n[ERROR] in %s : cannot update size in parent\n",
    1262             __FUNCTION__ );
     1298
     1299#if DEBUG_VFS_ERROR
     1300printk("\n[ERROR] in %s : thread[%x,%x] / cannot update size in parent / cycle %d\n",
     1301__FUNCTION__ , this->process->pid , this->trdid , cycle );
     1302#endif
    12631303            return -1;
    12641304        }
     
    12771317        if( error )
    12781318        {
    1279             printk("\n[ERROR] in %s : cannot synchronise parent mapper to device\n",
    1280             __FUNCTION__ );
     1319
     1320#if DEBUG_VFS_ERROR
     1321printk("\n[ERROR] in %s : thread[%x,%x] / cannot synchronise  mapper & device / cycle %d\n",
     1322__FUNCTION__ , this->process->pid , this->trdid , cycle );
     1323#endif
    12811324            return -1;
    12821325        }
     
    13671410    char           last_name[CONFIG_VFS_MAX_NAME_LENGTH];
    13681411
     1412#if DEBUG_VFS_MKDIR || DEBUG_VFS_ERROR
     1413uint32_t  cycle = (uint32_t)hal_get_cycles();
     1414#endif
     1415
    13691416    thread_t  * this    = CURRENT_THREAD;
    13701417    process_t * process = this->process;
     
    13731420char root_name[CONFIG_VFS_MAX_NAME_LENGTH];
    13741421vfs_inode_get_name( root_xp , root_name );
    1375 uint32_t   cycle = (uint32_t)hal_get_cycles();
    13761422if( DEBUG_VFS_MKDIR < cycle )
    13771423printk("\n[%s] thread[%x,%x] enter / root <%s> / path <%s> / cycle %d\n",
     
    13961442    if( error )
    13971443    {
     1444
     1445#if DEBUG_VFS_ERROR
     1446printk("\n[ERROR] in %s : thread[%x,%x] cannot get parent inode for <%s> / cycle %d\n",
     1447__FUNCTION__, process->pid, this->trdid, path , cycle );
     1448#endif
    13981449        remote_rwlock_wr_release( lock_xp );
    1399         printk("\n[ERROR] in %s : cannot get parent inode for <%s>\n",
    1400         __FUNCTION__, path );
    14011450        return -1;
    14021451    }
     
    14231472    if( error )
    14241473    {
     1474
     1475#if DEBUG_VFS_ERROR
     1476printk("\n[ERROR] in %s : thread[%x,%x] cannot create dentry in cluster %x for <%s> / cycle %d\n",
     1477__FUNCTION__, process->pid, this->trdid, parent_cxy, path , cycle );
     1478#endif
    14251479        remote_rwlock_wr_release( lock_xp );
    1426         printk("\n[ERROR] in %s : cannot create new dentry in cluster %x for <%s>\n",
    1427         __FUNCTION__, parent_cxy, path );
    14281480        return -1;
    14291481    }
     
    14571509    if( error )
    14581510    {
     1511
     1512#if DEBUG_VFS_ERROR
     1513printk("\n[ERROR] in %s : thread[%x,%x] cannot create inode in cluster %x for <%s> / cycle %d\n",
     1514__FUNCTION__, process->pid, this->trdid, parent_cxy, path , cycle );
     1515#endif
    14591516        remote_rwlock_wr_release( lock_xp );
    1460         printk("\n[ERROR] in %s : cannot create new inode in cluster %x for <%s>\n",
    1461          __FUNCTION__ , inode_cxy , path );
    14621517        vfs_dentry_destroy( dentry_xp );
    14631518        return -1;
     
    15041559    if( error )
    15051560    {
     1561
     1562#if DEBUG_VFS_ERROR
     1563printk("\n[ERROR] in %s : thread[%x,%x] cannot create <.> & <..> dentries for <%s> / cycle %d\n",
     1564__FUNCTION__, process->pid, this->trdid, path , cycle );
     1565#endif
     1566        vfs_remove_child_from_parent( dentry_xp );
    15061567        remote_rwlock_wr_release( lock_xp );
    1507         printk("\n[ERROR] in %s : cannot create new inode in cluster %x for <%s>\n",
    1508         __FUNCTION__ , inode_cxy , path );
    1509         vfs_dentry_destroy( dentry_xp );
    15101568        return -1;
    15111569    }
     
    15201578    if( error )
    15211579    {
    1522         printk("\n[ERROR] in %s : cannot update parent directory for <%s>\n",
    1523         __FUNCTION__, path );
     1580
     1581#if DEBUG_VFS_ERROR
     1582printk("\n[ERROR] in %s : thread[%x,%x] cannot update parent directory for <%s> / cycle %d\n",
     1583__FUNCTION__, process->pid, this->trdid, path , cycle );
     1584#endif
     1585        vfs_remove_child_from_parent( dentry_xp );
    15241586        return -1;
    15251587    }
     
    15271589#if(DEBUG_VFS_MKDIR & 1)
    15281590if( DEBUG_VFS_MKDIR < cycle )
    1529 printk("\n[%s] thread[%x,%x] updated parent dir (mapper and IOC) for <%s>\n",
     1591printk("\n[%s] thread[%x,%x] created <%s> dir (Inode-Tree, Mapper and IOC)\n",
    15301592__FUNCTION__, process->pid, this->trdid, path );
    15311593#endif
     
    15651627    char           new_name[CONFIG_VFS_MAX_NAME_LENGTH];
    15661628
     1629#if DEBUG_VFS_LINK || DEBUG_VFS_ERROR
     1630uint32_t  cycle = (uint32_t)hal_get_cycles();
     1631#endif
     1632
    15671633    thread_t  * this    = CURRENT_THREAD;
    15681634    process_t * process = this->process;
     
    15731639vfs_inode_get_name( old_root_xp , old_root_name );
    15741640vfs_inode_get_name( new_root_xp , new_root_name );
    1575 uint32_t   cycle = (uint32_t)hal_get_cycles();
    15761641if( DEBUG_VFS_LINK < cycle )
    15771642printk("\n[%s] thread[%x,%x] enter / old_root <%s> / old_path <%s> / "
     
    15981663    if( error )
    15991664    {
     1665
     1666#if DEBUG_VFS_ERROR
     1667printk("\n[ERROR] in %s : thread[%x,%x] cannot get target inode for <%s> / cycle %d\n",
     1668__FUNCTION__, process->pid, this->trdid, old_path , cycle );
     1669#endif
    16001670        remote_rwlock_wr_release( lock_xp );
    1601         printk("\n[ERROR] in %s : cannot get target inode for <%s>\n",
    1602         __FUNCTION__, old_path );
    16031671        return -1;
    16041672    }
     
    16191687    if( error )
    16201688    {
     1689
     1690#if DEBUG_VFS_ERROR
     1691printk("\n[ERROR] in %s : thread[%x,%x] cannot get parent inode for <%s> / cycle %d\n",
     1692__FUNCTION__, process->pid, this->trdid, new_path , cycle );
     1693#endif
    16211694        remote_rwlock_wr_release( lock_xp );
    1622         printk("\n[ERROR] in %s : cannot get parent inode for <%s>\n",
    1623         __FUNCTION__, new_path );
    16241695        return -1;
    16251696    }
     
    16551726        if( error )
    16561727        {
     1728
     1729#if DEBUG_VFS_ERROR
     1730printk("\n[ERROR] in %s : thread[%x,%x] cannot create new dentry for <%s> / cycle %d\n",
     1731__FUNCTION__, process->pid, this->trdid, new_path , cycle );
     1732#endif
    16571733            remote_rwlock_wr_release( lock_xp );
    1658             printk("\n[ERROR] in %s : cannot create new dentry for <%s>\n",
    1659             __FUNCTION__, new_path );
    16601734            return -1;
    16611735        }
     
    16961770        if( error )
    16971771        {
    1698             printk("\n[ERROR] in %s : cannot update new parent directory for <%s>\n",
    1699             __FUNCTION__, new_path );
     1772
     1773#if DEBUG_VFS_ERROR
     1774printk("\n[ERROR] in %s : thread[%x,%x] cannot update parent directory for <%s> / cycle %d\n",
     1775__FUNCTION__, process->pid, this->trdid, new_path , cycle );
     1776#endif
    17001777            return -1;
    17011778        }
     
    17101787    else
    17111788    {
    1712         // release the lock protecting Inode Tree
     1789
     1790#if DEBUG_VFS_ERROR
     1791printk("\n[ERROR] in %s : thread[%x,%x] / unsupported inode type %s / cycle %d\n",
     1792__FUNCTION__, process->pid, this->trdid, vfs_inode_type_str( inode_type ), cycle );
     1793#endif
    17131794        remote_rwlock_wr_release( lock_xp );
    1714 
    1715         printk("\n[ERROR] in %s : unsupported inode type %s\n",
    1716         __FUNCTION__ , vfs_inode_type_str( inode_type ) );
    17171795        return -1;
    17181796    }
     
    17461824    char              parent_name[CONFIG_VFS_MAX_NAME_LENGTH];  // name of parent directory
    17471825
     1826#if DEBUG_VFS_UNLINK || DEBUG_VFS_ERROR
     1827uint32_t  cycle = (uint32_t)hal_get_cycles();
     1828#endif
     1829
    17481830    thread_t  * this    = CURRENT_THREAD;
    17491831    process_t * process = this->process;
    17501832
    17511833#if DEBUG_VFS_UNLINK
    1752 uint32_t   cycle = (uint32_t)hal_get_cycles();
    17531834char root_name[CONFIG_VFS_MAX_NAME_LENGTH];
    17541835vfs_inode_get_name( root_xp , root_name );
     
    17751856    if( error )
    17761857    {
     1858
     1859#if DEBUG_VFS_ERROR
     1860printk("\n[ERROR] in %s : thread[%x,%x] cannot get parent inode for <%s> / cycle %d\n",
     1861__FUNCTION__, process->pid, this->trdid, path , cycle );
     1862#endif
    17771863        remote_rwlock_wr_release( lock_xp );
    1778         printk("\n[ERROR] in %s : cannot get parent inode for <%s> in <%s>\n",
    1779         __FUNCTION__, child_name, path );
    17801864        return -1;
    17811865    }
     
    18241908        if( error )
    18251909        {
    1826             printk("\n[ERROR] in %s : cannot create inode <%s> in Inode Tree\n",
    1827             __FUNCTION__ , child_name );
     1910
     1911#if DEBUG_VFS_ERROR
     1912printk("\n[ERROR] in %s : thread[%x,%x] cannot create node <%s> in Inode_Tree / cycle %d\n",
     1913__FUNCTION__, process->pid, this->trdid, path, cycle );
     1914#endif
     1915            remote_rwlock_wr_release( lock_xp );
    18281916            return -1;
    18291917        }
     
    18391927        if ( error )
    18401928        {
    1841             printk("\n[ERROR] in %s : cannot get entry <%s> in parent <%s> mapper\n",
    1842             __FUNCTION__ , child_name, parent_name );
     1929
     1930#if DEBUG_VFS_ERROR
     1931printk("\n[ERROR] in %s : thread[%x,%x] cannot get dentry  <%s> in parent <%s> mapper / cycle %d\n",
     1932__FUNCTION__, process->pid, this->trdid, child_name, parent_name, cycle );
     1933#endif
     1934            remote_rwlock_wr_release( lock_xp );
    18431935            return -1;
    18441936        }
     
    18611953    }
    18621954
    1863     // At this point the Inode Tree contains the target dentry and child inode
     1955    // At this point the Inode-Tree contains the parent dentry and child inode
    18641956    // we can safely remove this dentry from both the parent mapper, and the Inode Tree.
    18651957
     
    18971989            if( inode_children != 0 )
    18981990            {
     1991
     1992#if DEBUG_VFS_ERROR
     1993printk("\n[ERROR] in %s : thread[%x,%x] cannot remove <%s> inode that has children / cycle %d\n",
     1994__FUNCTION__, process->pid, this->trdid, path, cycle );
     1995#endif
    18991996                remote_rwlock_wr_release( lock_xp );
    1900                 printk("\n[ERROR] in %s : cannot remove <%s> inode that has children\n",
    1901                 __FUNCTION__, path );
    19021997                return -1;
    19031998            }
     
    19082003            if( error )
    19092004            {
     2005
     2006#if DEBUG_VFS_ERROR
     2007printk("\n[ERROR] in %s : thread[%x,%x] cannot update FAT mapper to remove <s> / cycle %d\n",
     2008__FUNCTION__, process->pid, this->trdid, path, cycle );
     2009#endif
    19102010                remote_rwlock_wr_release( lock_xp );
    1911                 printk("\n[ERROR] in %s : cannot update FAT mapper to remove <%s> inode\n",
    1912                 __FUNCTION__ , path );
    19132011                return -1;
    19142012            }
     
    19272025        if( error )
    19282026        {
     2027
     2028#if DEBUG_VFS_ERROR
     2029printk("\n[ERROR] in %s : thread[%x,%x] cannot update parent directory on IOC for <s> / cycle %d\n",
     2030__FUNCTION__, process->pid, this->trdid, path, cycle );
     2031#endif
    19292032            remote_rwlock_wr_release( lock_xp );
    1930             printk("\n[ERROR] in %s : cannot update dentry on device for <%s>\n",
    1931             __FUNCTION__ , path );
    19322033            return -1;
    19332034        }
     
    19792080    else
    19802081    {
     2082
     2083#if DEBUG_VFS_ERROR
     2084printk("\n[ERROR] in %s : thread[%x,%x] unsupported inode type %d for <s> / cycle %d\n",
     2085__FUNCTION__, process->pid, this->trdid, vfs_inode_type_str( inode_type ), path, cycle );
     2086#endif
    19812087        remote_rwlock_wr_release( lock_xp );
    1982         printk("\n[ERROR] in %s : unsupported inode type %s\n",
    1983         __FUNCTION__ , vfs_inode_type_str( inode_type ) );
    19842088        return -1;
    19852089    }
     
    20042108    process_t * process = this->process;
    20052109
     2110#if DEBUG_VFS_STAT || DEBUG_VFS_ERROR
     2111uint32_t  cycle = (uint32_t)hal_get_cycles();
     2112#endif
     2113
    20062114    // build extended pointer on lock protecting Inode Tree (in VFS root inode)
    20072115    vfs_root_xp  = process->vfs_root_xp;
     
    20252133    if( error )
    20262134    {
    2027         printk("\n[ERROR] in %s : cannot found inode <%s>\n",
    2028         __FUNCTION__ , path );
     2135
     2136#if DEBUG_VFS_ERROR
     2137printk("\n[ERROR] in %s : thread[%x,%x] cannot found inode <%s> / cycle %d\n",
     2138__FUNCTION__, process->pid, this->trdid, path, cycle );
     2139#endif
    20292140        return -1;
    20302141    }
     
    20502161
    20512162#if DEBUG_VFS_STAT
    2052 uint32_t cycle  = (uint32_t)hal_get_cycles();
    20532163if( DEBUG_VFS_STAT < cycle )
    2054 printk("\n[%s] thread[%x,%x] set stat %x for inode %x in cluster %x / cycle %d\n"
    2055        " %s / inum %d / size %d\n",
    2056 __FUNCTION__, process->pid, this->trdid, st, inode_ptr, inode_cxy, cycle,
    2057 vfs_inode_type_str( type ), inum, size );
     2164printk("\n[%s] thread[%x,%x] set stat for <%s> / %s / inum %d / size %d / cycle %d\n",
     2165__FUNCTION__, process->pid, this->trdid, path, vfs_inode_type_str( type ), inum, size, cycle );
    20582166#endif
    20592167
     
    20842192    process_t * process = this->process;
    20852193
    2086 #if DEBUG_VFS_CHDIR
    2087 uint32_t cycle = (uint32_t)hal_get_cycles();
    2088 if( DEBUG_VFS_CHDIR < cycle )
    2089 printk("\n[%s] thread[%x,%x] enter for path <%s> / cycle %d\n",
    2090 __FUNCTION__, process->pid, this->trdid, path, cycle );
     2194#if DEBUG_VFS_CHDIR || DEBUG_VFS_ERROR
     2195uint32_t  cycle = (uint32_t)hal_get_cycles();
    20912196#endif
    20922197
     
    21122217    if( error )
    21132218    {
    2114         printk("\n[ERROR] in %s : <%s> not found\n",
    2115         __FUNCTION__, path );
     2219
     2220#if DEBUG_VFS_ERROR
     2221printk("\n[ERROR] in %s : thread[%x,%x] cannot found inode <%s> / cycle %d\n",
     2222__FUNCTION__, process->pid, this->trdid, path, cycle );
     2223#endif
    21162224        return -1;
    21172225    }
     
    21242232    if( inode_type != FILE_TYPE_DIR )
    21252233    {
    2126         printk("\n[ERROR] in %s : <%s> is not a directory\n",
    2127         __FUNCTION__, path );
     2234
     2235#if DEBUG_VFS_ERROR
     2236printk("\n[ERROR] in %s : thread[%x,%x] / <%s> is not a directory / cycle %d\n",
     2237__FUNCTION__, process->pid, this->trdid, path, cycle );
     2238#endif
    21282239        return -1;
    21292240    }
     
    21462257
    21472258#if DEBUG_VFS_CHDIR
    2148 cycle = (uint32_t)hal_get_cycles();
    21492259if( DEBUG_VFS_CHDIR < cycle )
    2150 printk("\n[%s] thread[%x,%x] exit : inode (%x,%x) / &cwd_xp (%x,%x) / cycle %d\n",
    2151 __FUNCTION__, process->pid, this->trdid, inode_cxy, inode_ptr,
    2152 GET_CXY(cwd_xp_xp), GET_PTR(cwd_xp_xp), cycle );
     2260printk("\n[%s] thread[%x,%x] set new cwd <%s> / inode_xp (%x,%x) / cycle %d\n",
     2261__FUNCTION__, process->pid, this->trdid, path, inode_cxy, inode_ptr, cycle );
    21532262#endif
    21542263
     
    21632272{
    21642273    error_t           error;
    2165     xptr_t            inode_xp;     // extended pointer on target inode
    2166     cxy_t             inode_cxy;    // inode cluster identifier       
    2167     vfs_inode_t     * inode_ptr;    // inode local pointer
    2168 
    2169 // check lookup working mode
    2170 assert( __FUNCTION__, (rights == 0), "access rights non implemented yet" );
    2171  
     2274    xptr_t            vfs_root_xp;        // extended pointer on VFS root inode
     2275    vfs_inode_t     * vfs_root_ptr;       // local_pointer on VFS root inode
     2276    cxy_t             vfs_root_cxy;       // VFS root inode cluster identifier
     2277    xptr_t            main_lock_xp;       // extended pointer on lock protecting Inode Tree
     2278    xptr_t            inode_xp;           // extended pointer on target inode
     2279    cxy_t             inode_cxy;          // inode cluster identifier       
     2280    vfs_inode_t     * inode_ptr;          // inode local pointer
     2281    vfs_file_type_t   inode_type;         // inode type   
     2282
     2283    thread_t  * this    = CURRENT_THREAD;
     2284    process_t * process = this->process;
     2285
     2286#if DEBUG_VFS_CHMOD || DEBUG_VFS_ERROR
     2287uint32_t  cycle = (uint32_t)hal_get_cycles();
     2288#endif
     2289
     2290    // build extended pointer on lock protecting Inode Tree (in VFS root inode)
     2291    vfs_root_xp  = process->vfs_root_xp;
     2292    vfs_root_ptr = GET_PTR( vfs_root_xp );
     2293    vfs_root_cxy = GET_CXY( vfs_root_xp );
     2294    main_lock_xp = XPTR( vfs_root_cxy , &vfs_root_ptr->main_lock );
     2295
     2296    // take lock protecting Inode Tree in read mode
     2297    remote_rwlock_rd_acquire( main_lock_xp );
     2298
    21722299    // get extended pointer on target inode
    21732300    error = vfs_lookup( cwd_xp,
     
    21772304                        NULL );
    21782305
    2179     if( error ) return error;
     2306    // release lock protecting Inode Tree in read mode
     2307    remote_rwlock_rd_release( main_lock_xp );
     2308
     2309    if( error )
     2310    {
     2311
     2312#if DEBUG_VFS_ERROR
     2313printk("\n[ERROR] in %s : thread[%x,%x] cannot found inode <%s> / cycle %d\n",
     2314__FUNCTION__, process->pid, this->trdid, path, cycle );
     2315#endif
     2316        return -1;
     2317    }
    21802318
    21812319    // get inode cluster and local pointer
     
    21842322   
    21852323    // get inode type from remote inode
    2186     // inode_type = hal_remote_l32( XPTR( inode_cxy , &inode_ptr->type ) );
     2324    inode_type = hal_remote_l32( XPTR( inode_cxy , &inode_ptr->type ) );
    21872325
    21882326    // TODO finalize implementation
    21892327
    2190 assert( __FUNCTION__, false , "not implemented" );
     2328assert( __FUNCTION__, false , "not fully implemented" );
    21912329
    21922330    // set inode rights in remote inode
    21932331    hal_remote_s32( XPTR( inode_cxy , &inode_ptr->rights ) , rights );
     2332
     2333#if DEBUG_VFS_CHMOD
     2334if( DEBUG_VFS_CHMOD < cycle )
     2335printk("\n[%s] thread[%x,%x] set access rights %x for <%s> / inode_xp (%x,%x) / cycle %d\n",
     2336__FUNCTION__, process->pid, this->trdid, rights, path, inode_cxy, inode_ptr, cycle );
     2337#endif
    21942338
    21952339    return 0;
     
    22122356    thread_t    * this    = CURRENT_THREAD;
    22132357    process_t   * process = this->process;
     2358
     2359#if DEBUG_VFS_MKFIFO || DEBUG_VFS_ERROR
     2360uint32_t  cycle = (uint32_t)hal_get_cycles();
     2361#endif
    22142362
    22152363    // build extended pointer on lock protecting Inode Tree
     
    22302378    if( error )
    22312379    {
    2232         printk("\n[ERROR] in %s : cannot get parent inode for <%s> path\n",
    2233         __FUNCTION__ , path );
     2380
     2381#if DEBUG_VFS_ERROR
     2382printk("\n[ERROR] in %s : thread[%x,%x] cannot found parent inode for <%s> / cycle %d\n",
     2383__FUNCTION__, process->pid, this->trdid, path, cycle );
     2384#endif
     2385        remote_rwlock_wr_release( vfs_lock_xp );
    22342386        return -1;
    22352387    }
     
    22592411    if( error )
    22602412    {
    2261         printk("\n[ERROR] in %s : cannot create fifo inode for <%s> path\n",
    2262         __FUNCTION__ , path );
     2413
     2414#if DEBUG_VFS_ERROR
     2415printk("\n[ERROR] in %s : thread[%x,%x] cannot create fifo inode for <%s> / cycle %d\n",
     2416__FUNCTION__, process->pid, this->trdid, path, cycle );
     2417#endif
     2418        remote_rwlock_wr_release( vfs_lock_xp );
    22632419        return -1;
    22642420    }
     
    22702426    if( pipe == NULL )
    22712427    {
    2272         printk("\n[ERROR] in %s : cannot create pipe for <%s> path\n",
    2273         __FUNCTION__ , path );
     2428
     2429#if DEBUG_VFS_ERROR
     2430printk("\n[ERROR] in %s : thread[%x,%x] cannot create pipe for <%s> / cycle %d\n",
     2431__FUNCTION__, process->pid, this->trdid, path, cycle );
     2432#endif
     2433        vfs_remove_child_from_parent( fifo_dentry_xp );
     2434        remote_rwlock_wr_release( vfs_lock_xp );
    22742435        return -1;
    22752436    }
     
    22822443    // release the lock protecting the Inode-Tree from write mode
    22832444    remote_rwlock_wr_release( vfs_lock_xp );
     2445
     2446#if DEBUG_VFS_MKDIR
     2447if( DEBUG_VFS_MKDIR < cycle )
     2448printk("\n[%s] thread[%x,%x] creared fifo <%s> / inode_xp [%x,%x] / cycle %d\n",
     2449__FUNCTION__, process->pid, this->trdid, path, fifo_cxy, fifo_inode_ptr, cycle );
     2450#endif
    22842451
    22852452    return 0;
     
    27462913
    27472914#if DEBUG_VFS_ERROR 
    2748 if( DEBUG_VFS_ERROR < cycle )
    2749 printk("\n[ERROR] in %s : thread[%x,%x] cannot create inode <%s> in path <%s>\n",
    2750 __FUNCTION__ , process->pid, this->trdid, name, pathname );
     2915printk("\n[ERROR] in %s : thread[%x,%x] cannot create inode <%s> in path <%s> / cycle %d\n",
     2916__FUNCTION__ , process->pid, this->trdid, name, pathname, cycle );
    27512917#endif
    27522918                    return -1;
     
    27772943
    27782944#if DEBUG_VFS_ERROR
    2779 if( DEBUG_VFS_ERROR < cycle )
    2780 printk("\n[ERROR] in %s : thread[%x,%x] cannot add dentry <%s> in parent dir\n",
    2781 __FUNCTION__, process->pid, this->trdid, name );
     2945printk("\n[ERROR] in %s : thread[%x,%x] cannot add dentry <%s> in parent dir / cycle %d\n",
     2946__FUNCTION__, process->pid, this->trdid, name, cycle );
    27822947#endif
    27832948                            vfs_remove_child_from_parent( dentry_xp );
     
    27952960
    27962961#if DEBUG_VFS_ERROR     
    2797 if( DEBUG_VFS_ERROR < cycle )
    2798 printk("\n[ERROR] in %s : thread[%x,%x] cannot found node <%s> in parent for <%s>\n",
    2799 __FUNCTION__ , process->pid, this->trdid, name, pathname );
     2962printk("\n[ERROR] in %s : thread[%x,%x] cannot found node <%s> in parent for <%s> / cycle %d\n",
     2963__FUNCTION__ , process->pid, this->trdid, name, pathname, cycle );
    28002964#endif
    28012965                        vfs_remove_child_from_parent( dentry_xp );
     
    28102974
    28112975#if DEBUG_VFS_ERROR
    2812 if( DEBUG_VFS_ERROR < cycle )
    2813 printk("\n[ERROR] in %s : thread[%x,%x] found an existing node <%s>  %\n",
    2814 __FUNCTION__ , process->pid, this->trdid, pathname );
     2976printk("\n[ERROR] in %s : thread[%x,%x] found an existing node <%s> / cycle %d\n",
     2977__FUNCTION__ , process->pid, this->trdid, pathname, cycle );
    28152978#endif
    28162979                       return -1;
     
    28312994                        {
    28322995#if DEBUG_VFS_ERROR
    2833 if( DEBUG_VFS_ERROR < cycle )
    2834 printk("\n[ERROR] in %s : thread[%x,%x] cannot load <%s> from device\n",
    2835 __FUNCTION__ , process->pid, this->trdid, name );
     2996printk("\n[ERROR] in %s : thread[%x,%x] cannot load <%s> from device / cycle %d\n",
     2997__FUNCTION__ , process->pid, this->trdid, name, cycle );
    28362998#endif
    28372999                            vfs_remove_child_from_parent( dentry_xp );
     
    28643026
    28653027#if DEBUG_VFS_ERROR
    2866 if( DEBUG_VFS_ERROR < cycle )
    2867 printk("\n[ERROR] in %s : thread[%x,%x] found an existing node <%s>\n",
    2868 __FUNCTION__ , process->pid, this->trdid, pathname );
     3028printk("\n[ERROR] in %s : thread[%x,%x] found an existing node <%s> / cycle %d\n",
     3029__FUNCTION__ , process->pid, this->trdid, pathname, cycle );
    28693030#endif
    28703031                return -1;
     
    29463107    xptr_t          children_entry_xp; // extended pointer on dentry "children" field
    29473108
     3109#if DEBUG_VFS_ADD_SPECIAL || DEBUG_VFS_ERROR
     3110uint32_t    cycle   = (uint32_t)hal_get_cycles();
     3111thread_t  * this    = CURRENT_THREAD;
     3112process_t * process = this->process;
     3113#endif
     3114
    29483115#if DEBUG_VFS_ADD_SPECIAL
    2949 uint32_t   cycle = (uint32_t)hal_get_cycles();
    2950 thread_t * this  = CURRENT_THREAD;
    29513116char child_name[CONFIG_VFS_MAX_NAME_LENGTH];
    29523117char parent_name[CONFIG_VFS_MAX_NAME_LENGTH];
     
    29553120if( DEBUG_VFS_ADD_SPECIAL < cycle )
    29563121printk("\n[%s] thread[%x,%x] enter for child <%s> in parent <%s> / cycle %d\n",
    2957 __FUNCTION__, this->process->pid, this->trdid, child_name, parent_name, cycle );
     3122__FUNCTION__, process->pid, this->trdid, child_name, parent_name, cycle );
    29583123#endif
    29593124
     
    29733138    if( error )
    29743139    {
    2975         printk("\n[ERROR] in %s : cannot create dentry <.> in cluster %x\n",
    2976         __FUNCTION__ , child_cxy );
     3140
     3141#if DEBUG_VFS_ERROR
     3142printk("\n[ERROR] in %s : thread[%x,%x] cannot create dentry <.> in cluster %x / cycle %d\n",
     3143__FUNCTION__ , process->pid, this->trdid, child_cxy, cycle );
     3144#endif
    29773145        return -1;
    29783146    }
     
    29823150
    29833151#if(DEBUG_VFS_ADD_SPECIAL & 1)
    2984 cycle = (uint32_t)hal_get_cycles();
    29853152if( DEBUG_VFS_ADD_SPECIAL < cycle )
    29863153printk("\n[%s] thread[%x,%x] created dentry <.> (%x,%x) / cycle %d\n",
    2987 __FUNCTION__, this->process->pid, this->trdid, child_cxy, dentry_ptr, cycle );
     3154__FUNCTION__, process->pid, this->trdid, child_cxy, dentry_ptr, cycle );
    29883155#endif
    29893156
     
    29963163    if( error )
    29973164    {
    2998         printk("\n[ERROR] in %s : cannot register dentry <.> in xhtab\n",
    2999         __FUNCTION__ );
     3165
     3166#if DEBUG_VFS_ERROR
     3167printk("\n[ERROR] in %s : thread[%x,%x] cannot register dentry <.> in xhtab / cycle %d\n",
     3168__FUNCTION__ , process->pid, this->trdid, cycle );
     3169#endif
    30003170        return -1;
    30013171    }
     
    30093179if( DEBUG_VFS_ADD_SPECIAL < cycle )
    30103180printk("\n[%s] thread[%x,%x] linked dentry <.> to parent and child inodes / cycle %d\n",
    3011 __FUNCTION__, this->process->pid, this->trdid, cycle );
     3181__FUNCTION__, process->pid, this->trdid, cycle );
    30123182#endif
    30133183
     
    30203190        if( error )
    30213191        {
    3022             printk("\n[ERROR] in %s : cannot introduce dentry <..> in mapper %x\n",
    3023             __FUNCTION__ );
     3192
     3193#if DEBUG_VFS_ERROR
     3194printk("\n[ERROR] in %s : thread[%x,%x] cannot register dentry <.> in mapper / cycle %d\n",
     3195__FUNCTION__ , process->pid, this->trdid, cycle );
     3196#endif
    30243197            return -1;
    30253198        }
     
    30293202if( DEBUG_VFS_ADD_SPECIAL < cycle )
    30303203printk("\n[%s] thread[%x,%x] registered dentry <.> in child mapper / cycle %d\n",
    3031 __FUNCTION__, this->process->pid, this->trdid, cycle );
     3204__FUNCTION__, process->pid, this->trdid, cycle );
    30323205#endif
    30333206
     
    30413214    if( error )
    30423215    {
    3043         printk("\n[ERROR] in %s : cannot create dentry <..> in cluster %x\n",
    3044         __FUNCTION__ , child_cxy );
     3216
     3217#if DEBUG_VFS_ERROR
     3218printk("\n[ERROR] in %s : thread[%x,%x] cannot create dentry <..> in cluster %x / cycle %d\n",
     3219__FUNCTION__ , process->pid, this->trdid, child_cxy, cycle );
     3220#endif
    30453221        return -1;
    30463222    }
     
    30533229if( DEBUG_VFS_ADD_SPECIAL < cycle )
    30543230printk("\n[%s] thread[%x,%x] created dentry <..> (%x,%x) / cycle %d\n",
    3055 __FUNCTION__, this->process->pid, this->trdid, child_cxy, dentry_ptr, cycle );
     3231__FUNCTION__, process->pid, this->trdid, child_cxy, dentry_ptr, cycle );
    30563232#endif
    30573233
     
    30593235    children_xhtab_xp = XPTR( child_cxy , &child_ptr->children );
    30603236    children_entry_xp = XPTR( child_cxy , &dentry_ptr->children );
     3237
    30613238    error = xhtab_insert( children_xhtab_xp , ".." , children_entry_xp );
     3239
    30623240    if( error )
    30633241    {
    3064         printk("\n[ERROR] in %s : cannot register dentry <..> in xhtab\n",
    3065         __FUNCTION__ );
     3242
     3243#if DEBUG_VFS_ERROR
     3244printk("\n[ERROR] in %s : thread[%x,%x] cannot register dentry <..> in xhtab / cycle %d\n",
     3245__FUNCTION__ , process->pid, this->trdid, cycle );
     3246#endif
    30663247        return -1;
    30673248    }
     
    30773258if( DEBUG_VFS_ADD_SPECIAL < cycle )
    30783259printk("\n[%s] thread[%x,%x] linked dentry <..> to parent and child inodes / cycle %d\n",
    3079 __FUNCTION__, this->process->pid, this->trdid, cycle );
     3260__FUNCTION__, process->pid, this->trdid, cycle );
    30803261#endif
    30813262
     
    30883269        if( error )
    30893270        {
    3090             printk("\n[ERROR] in %s : cannot introduce dentry <..> in mapper %x\n",
    3091             __FUNCTION__ );
     3271
     3272#if DEBUG_VFS_ERROR
     3273printk("\n[ERROR] in %s : thread[%x,%x] cannot register dentry <..> in mapper / cycle %d\n",
     3274__FUNCTION__ , process->pid, this->trdid, cycle );
     3275#endif
    30923276            return -1;
    30933277        }
     
    30973281if( DEBUG_VFS_ADD_SPECIAL < cycle )
    30983282printk("\n[%s] thread[%x,%x] registered dentry <..> in child mapper / cycle %d\n",
    3099 __FUNCTION__, this->process->pid, this->trdid, cycle );
     3283__FUNCTION__, process->pid, this->trdid, cycle );
    31003284#endif
    31013285
     
    31063290if( DEBUG_VFS_ADD_SPECIAL < cycle )
    31073291printk("\n[%s] thread[%x,%x] exit for child <%s> in parent <%s> / cycle %d\n",
    3108 __FUNCTION__, this->process->pid, this->trdid, child_name, parent_name, cycle );
     3292__FUNCTION__, process->pid, this->trdid, child_name, parent_name, cycle );
    31093293#endif
    31103294
     
    31393323
    31403324#if DEBUG_VFS_GET_PATH
    3141 uint32_t cycle = (uint32_t)hal_get_cycles();
     3325uint32_t    cycle   = (uint32_t)hal_get_cycles();
     3326#endif
     3327
     3328#if DEBUG_VFS_GET_PATH
    31423329if( DEBUG_VFS_GET_PATH < cycle )
    31433330printk("\n[%s] thread[%x,%x] enter : inode (%x,%x) / cycle %d\n",
     
    32963483
    32973484#if DEBUG_VFS_ERROR
    3298 if( DEBUG_VFS_ERROR < cycle )
    3299 printk("\n[ERROR] in %s : thread[%x,%x] cannot create dentry <%s> in cluster %x\n",
    3300 __FUNCTION__ , this->process->pid, this->trdid , name , parent_cxy );
     3485printk("\n[ERROR] in %s : thread[%x,%x] cannot create dentry <%s> in cluster %x / cycle %d\n",
     3486__FUNCTION__ , this->process->pid, this->trdid , name , parent_cxy, cycle );
    33013487#endif
    33023488        return -1;
     
    33303516
    33313517#if DEBUG_VFS_ERROR
    3332 if( DEBUG_VFS_ERROR < cycle )
    3333 printk("\n[ERROR] in %s : thread[%x,%x] cannot create inode in cluster %x\n",
    3334 __FUNCTION__ , this->process->pid , this->trdid , child_cxy );
     3518printk("\n[ERROR] in %s : thread[%x,%x] cannot create inode in cluster %x / cycle %d\n",
     3519__FUNCTION__ , this->process->pid , this->trdid , child_cxy, cycle );
    33353520#endif
    33363521 
     
    34283613
    34293614#if DEBUG_VFS_REMOVE_CHILD
    3430 if( DEBUG_VFS_REMOVE_CHILD < cycle )
    34313615printk("\n[%s] thread[%x,%x] enter for dentry[%x,%x] / inode[%x,%x] / cycle %d\n",
    34323616__FUNCTION__, this->process->pid, this->trdid,
     
    34413625    if( error )
    34423626    {
    3443         printk("\n[WARNING] in %s] thread[%x,%x] cannot remove dentry %s from parent dir\n",
    3444         __FUNCTION__, this->process->pid, this->trdid, dentry_name );
    3445     }
    3446 
    3447 #if DEBUG_VFS_REMOVE_CHILD
    3448 cycle = (uint32_t)hal_get_cycles();
     3627        printk("\n[WARNING] in %s : thread[%x,%x] cannot remove dentry <%s> from parent\n",
     3628        __FUNCTION__ , this->process->pid , this->trdid , dentry_name );
     3629    }
     3630
     3631#if(DEBUG_VFS_REMOVE_CHILD & 1)
    34493632if( DEBUG_VFS_REMOVE_CHILD < cycle )
    3450 printk("\n[%s] thread[%x,%x] removed dentry from parent inode / cycle %d\n",
    3451 __FUNCTION__, this->process->pid, this->trdid, cycle );
     3633printk("\n[%s] thread[%x,%x] removed dentry from parent inode\n",
     3634__FUNCTION__, this->process->pid, this->trdid );
    34523635#endif
    34533636
     
    34583641    links = hal_remote_atomic_add( XPTR( child_cxy , &child_inode_ptr->links ) , -1 );
    34593642
    3460 #if DEBUG_VFS_REMOVE_CHILD
    3461 cycle = (uint32_t)hal_get_cycles();
     3643#if(DEBUG_VFS_REMOVE_CHILD & 1)
    34623644if( DEBUG_VFS_REMOVE_CHILD < cycle )
    3463 printk("\n[%s] thread[%x,%x] removed dentry from child inode / cycle %d\n",
    3464 __FUNCTION__, this->process->pid, this->trdid, cycle );
     3645printk("\n[%s] thread[%x,%x] removed dentry from child inode\n",
     3646__FUNCTION__, this->process->pid, this->trdid );
    34653647#endif
    34663648
     
    37233905assert( __FUNCTION__, (array != NULL) , "child pointer is NULL");
    37243906assert( __FUNCTION__, (detailed == false) , "detailed argument not supported\n");
    3725 
    3726     // check inode type
    3727     if( inode->type != FILE_TYPE_DIR )
    3728     {
    3729         printk("\n[ERROR] in %s : target inode is not a directory\n",
    3730         __FUNCTION__ );
    3731         return -1;
    3732     }
     3907assert( __FUNCTION__, (inode->type == FILE_TYPE_DIR), "inode is not a directory\n");
    37333908
    37343909    // get parent inode FS type
  • trunk/kernel/fs/vfs.h

    r673 r683  
    168168 *****************************************************************************************/
    169169
    170 /* this enum define the VFS inode types values                                           */
     170/* this enum define the VFS file types                                                   */
    171171/* WARNING : this enum must be kept consistent with macros in <shared_stat.h> file       */
    172172/*           and with types in <shared_dirent.h> file.                                   */
     
    174174typedef enum   
    175175{
    176     FILE_TYPE_REG   =     0,           /*! regular file                                 */
    177     FILE_TYPE_DIR   =     1,           /*! directory                                    */
    178     FILE_TYPE_FIFO  =     2,           /*! POSIX named fifo                             */
    179     FILE_TYPE_PIPE  =     3,           /*! POSIX anonymous pipe                         */
    180     FILE_TYPE_SOCK  =     4,           /*! POSIX anonymous socket                       */
    181     FILE_TYPE_DEV   =     5,           /*! character device                             */
    182     FILE_TYPE_BLK   =     6,           /*! block device                                 */
    183     FILE_TYPE_SYML  =     7,           /*! symbolic link                                */
     176    FILE_TYPE_REG   =     0,           /*! regular file                                  */
     177    FILE_TYPE_DIR   =     1,           /*! directory                                     */
     178    FILE_TYPE_FIFO  =     2,           /*! POSIX named fifo                              */
     179    FILE_TYPE_PIPE  =     3,           /*! POSIX anonymous pipe                          */
     180    FILE_TYPE_SOCK  =     4,           /*! POSIX anonymous socket                        */
     181    FILE_TYPE_DEV   =     5,           /*! character device                              */
     182    FILE_TYPE_BLK   =     6,           /*! block device                                  */
     183    FILE_TYPE_SYML  =     7,           /*! symbolic link                                 */
    184184}
    185185vfs_file_type_t;
     
    200200        struct vfs_ctx_s      * ctx;        /*! local pointer on FS context.                 */
    201201        vfs_file_attr_t         attr;       /*! file attributes bit vector (see above)       */
    202         vfs_file_type_t        type;       /*! same type as inode                           */
     202        vfs_file_type_t         type;       /*! same type as inode                           */
    203203        uint32_t                offset;     /*! seek position in file                        */
    204204        remote_rwlock_t         lock;       /*! protect offset modifications                 */
     
    285285    uint32_t              inum;          /*! inode identifier (unique in file system)    */
    286286    uint32_t              attr;          /*! inode attributes (see above)                */
    287     vfs_file_type_t      type;          /*! inode type (see above)                      */
     287    vfs_file_type_t       type;          /*! inode type (see vfs_file_t)                 */
    288288    uint32_t              size;          /*! number of bytes                             */
    289289    uint32_t              uid;           /*! user owner identifier                       */
     
    829829/******************************************************************************************
    830830 * This function returns, in the structure pointed by the <st> pointer, various
    831  * informations on the inode identified by the <root_inode_xp> and <patname> arguments.
     831 * informations on the file identified by the <root_inode_xp> and <patname> arguments.
    832832 *
    833833 * TODO : only partially implemented yet (only size and inum fields).
  • trunk/kernel/kern/alarm.c

    r669 r683  
    3131
    3232////////////////////////////////////////////////////////////////////////////////////////////
    33 // This static function registers the alarm identified ny the <new_alarm> argument
     33// This static function registers the alarm identified by the <alarm> & <cxy> arguments
    3434// in the list of alarms rooted in the core identified by the <core> argument.
    3535// When the existing list of alarms is not empty, it scan the list to insert the new
    3636// alarm in the right place to respect the absolute dates ordering.
    3737////////////////////////////////////////////////////////////////////////////////////////////
    38 // @ new_alarm  : local pointer on the new alarm.
    39 // @ core       : local pointer on the target core.
     38// @ cxy    : cluster containing both the new alarm and the core.
     39// @ alarm  : local pointer on the alarm.
     40// @ core   : local pointer on the core.
    4041////////////////////////////////////////////////////////////////////////////////////////////
    41 static void alarm_register( alarm_t * new_alarm,
     42static void alarm_register( cxy_t     cxy,
     43                            alarm_t * alarm,
    4244                            core_t  * core )
    4345{
    44     list_entry_t * current;          // pointer on current list_entry in existing list
    45     list_entry_t * previous;         // pointer on previous list_entry in existing list
    46     alarm_t      * current_alarm;    // pointer on current alarm in existing list
    47     cycle_t        current_date;     // date of current alarm in existing list
    48 
    49     bool_t         done = false;
    50 
    51     // get pointers on root of alarms and lock
     46    // get alarm date
     47    cycle_t new_date = hal_remote_l64( XPTR( cxy , &alarm->date ) );
     48
     49    // build local pointer on root of alarms list
    5250    list_entry_t * root = &core->alarms_root;
    53     busylock_t   * lock = &core->alarms_lock;
    54 
    55     // get pointer on new_alarm list_entry
    56     list_entry_t * new_entry = &new_alarm->list;
    57 
    58     // get new_alarm date
    59     cycle_t        new_date = new_alarm->date;
    60 
    61     // take the lock
    62     busylock_acquire( lock );
     51
     52    // build local pointer on new alarm list_entry
     53    list_entry_t * new  = &alarm->list;
    6354
    6455    // insert new alarm to respect dates order
    65     if( list_is_empty( root ) )                     // list empty
     56    if( list_remote_is_empty( cxy , &core->alarms_root ) )  // list empty
    6657    {
    67         list_add_first( root , new_entry );
     58        list_remote_add_first( cxy , root , new );
    6859    }
    69     else                                            // list non empty
     60    else                                                   // list non empty
    7061    {
    71         for( current = root->next ;
    72              (current != root) && (done == false) ;
    73              current = current->next )
     62        list_entry_t * iter;        // local pointer on current list_entry in existing list
     63        alarm_t      * iter_alarm;  // local pointer on current alarm in existing list
     64        cycle_t        iter_date;   // date of current alarm in existing list
     65        bool_t         done = false;
     66
     67        for( iter = hal_remote_lpt( XPTR( cxy , &root->next ) ) ;
     68             (iter != root) && (done == false) ;
     69             iter = hal_remote_lpt( XPTR( cxy , &iter->next ) ) )
    7470        {
    75             // get pointer on previous entry in existing list
    76             previous = current->pred;
    77 
    78             // get pointer on current alarm
    79             current_alarm = LIST_ELEMENT( current , alarm_t , list );
     71            // get local pointer on pred and next for iter
     72            list_entry_t * prev = hal_remote_lpt( XPTR( cxy , &iter->pred ) );
     73
     74            // get local pointer on current alarm
     75            iter_alarm = LIST_ELEMENT( iter , alarm_t , list );
    8076
    8177            // get date for current alarm
    82             current_date  = current_alarm->date;
    83 
    84             if( current_date > new_date )  // insert new alarm just before current
     78            iter_date = hal_remote_l64( XPTR( cxy , &iter_alarm->date ) );
     79
     80            // insert new alarm just before current when required
     81            if( iter_date > new_date ) 
    8582            {
    86                 new_entry->next = current;
    87                 new_entry->pred = previous;
    88 
    89                 current->pred  = new_entry;
    90                 previous->next = new_entry;
     83                hal_remote_spt( XPTR( cxy , &new->next ) , iter );
     84                hal_remote_spt( XPTR( cxy , &new->pred ) , prev );
     85
     86                hal_remote_spt( XPTR( cxy , &iter->pred ) , new );
     87                hal_remote_spt( XPTR( cxy , &prev->next ) , new );
    9188               
    9289                done = true;
     
    9693        if( done == false ) // new_date is larger than all registered dates
    9794        {
    98             list_add_last( root , new_entry );
     95            list_remote_add_last( cxy, root , new );
    9996        }
    10097    }
    101 
     98}  // end alarm_register()
     99           
     100
     101///////////////////////////////////
     102void alarm_init( alarm_t *  alarm )
     103{
     104    alarm->linked   = false;
     105    list_entry_init( &alarm->list );
     106}
     107
     108///////////////////////////////////////
     109void alarm_start( xptr_t     thread_xp,
     110                  cycle_t    date,
     111                  void     * func_ptr,
     112                  xptr_t     args_xp )
     113{
     114    // get cluster and local pointer on target thread
     115    thread_t * tgt_ptr = GET_PTR( thread_xp );
     116    cxy_t      tgt_cxy = GET_CXY( thread_xp );
     117
     118// check alarm state
     119assert( __FUNCTION__ , (hal_remote_l32( XPTR(tgt_cxy,&tgt_ptr->alarm.linked)) == false ),
     120"alarm already started");
     121
     122    // get local pointer on core running target thread
     123    core_t * core = hal_remote_lpt( XPTR( tgt_cxy , &tgt_ptr->core ) );
     124
     125    // build extended pointer on lock protecting alarms list
     126    xptr_t lock_xp = XPTR( tgt_cxy , &core->alarms_lock );
     127 
     128    // initialize alarm descriptor
     129    hal_remote_s64( XPTR( tgt_cxy , &tgt_ptr->alarm.date     ) , date );
     130    hal_remote_spt( XPTR( tgt_cxy , &tgt_ptr->alarm.func_ptr ) , func_ptr );
     131    hal_remote_s64( XPTR( tgt_cxy , &tgt_ptr->alarm.args_xp  ) , args_xp );
     132    hal_remote_s32( XPTR( tgt_cxy , &tgt_ptr->alarm.linked   ) , true );
     133
     134    // take the lock
     135    remote_busylock_acquire( lock_xp );
     136
     137    // register alarm in core list
     138    alarm_register( tgt_cxy , &tgt_ptr->alarm , core );
     139
     140    //release the lock
     141    remote_busylock_release( lock_xp );
     142
     143}  // end alarm_start()
     144
     145
     146/////////////////////////////////////
     147void alarm_stop( xptr_t   thread_xp )
     148{
     149    // get cluster and local pointer on target thread
     150    thread_t * tgt_ptr = GET_PTR( thread_xp );
     151    cxy_t      tgt_cxy = GET_CXY( thread_xp );
     152
     153    // get local pointer on core running target thread
     154    core_t * core = hal_remote_lpt( XPTR( tgt_cxy , &tgt_ptr->core ) );
     155
     156    // build extended pointer on lock protecting alarms list
     157    xptr_t lock_xp = XPTR( tgt_cxy , &core->alarms_lock );
     158 
     159    // take the lock
     160    remote_busylock_acquire( lock_xp );
     161
     162    // unlink the alarm from the list rooted in core
     163    list_remote_unlink( tgt_cxy , &tgt_ptr->alarm.list );
     164
     165    // update alarm state
     166    hal_remote_s32( XPTR( tgt_cxy , &tgt_ptr->alarm.linked ) , false );
     167
     168    //release the lock
     169    remote_busylock_release( lock_xp );
     170
     171}  // end alarm_stop()
     172
     173
     174//////////////////////////////////////
     175void alarm_update( xptr_t   thread_xp,
     176                   cycle_t  new_date )
     177{
     178    // get cluster and local pointer on target thread
     179    thread_t * tgt_ptr = GET_PTR( thread_xp );
     180    cxy_t      tgt_cxy = GET_CXY( thread_xp );
     181
     182    // get local pointer on core running target thread
     183    core_t * core = hal_remote_lpt( XPTR( tgt_cxy , &tgt_ptr->core ) );
     184
     185    // build extended pointer on lock protecting alarms list
     186    xptr_t lock_xp = XPTR( tgt_cxy , &core->alarms_lock );
     187 
     188    // take the lock
     189    remote_busylock_acquire( lock_xp );
     190
     191    // unlink the alarm from the core list
     192    list_remote_unlink( tgt_cxy , &tgt_ptr->alarm.list );
     193
     194    // update the alarm date and state
     195    hal_remote_s64( XPTR( tgt_cxy , &tgt_ptr->alarm.date   ) , new_date );
     196    hal_remote_s32( XPTR( tgt_cxy , &tgt_ptr->alarm.linked ) , true );
     197
     198    // register alarm in core list
     199    alarm_register( tgt_cxy , &tgt_ptr->alarm , core );
     200   
    102201    // release the lock
    103     busylock_release( lock );
    104 
    105 }  // end alarm_register()
    106 
    107 //////////////////////////////////////
    108 void alarm_start( cycle_t    date,
    109                   void     * func_ptr,
    110                   xptr_t     args_xp,
    111                   thread_t * thread )
    112 {
    113     // get pointer on alarm
    114     alarm_t * alarm = &thread->alarm;
    115 
    116     // initialize alarm descriptor
    117     alarm->date     = date;
    118     alarm->func_ptr = func_ptr;
    119     alarm->args_xp = args_xp;
    120    
    121     // register alarm in core list
    122     alarm_register( alarm , thread->core );
    123 
    124 }  // end alarm_start()
    125 
    126 /////////////////////////////////////
    127 void alarm_update( thread_t * thread,
    128                    cycle_t    new_date )
    129 {
    130     // get pointer on alarm
    131     alarm_t * alarm = &thread->alarm;
    132 
    133     // get pointer on core
    134     core_t   * core = thread->core;
    135 
    136     // get pointer on lock protecting the alarms list
    137     busylock_t   * lock = &core->alarms_lock;
    138 
    139     // unlink the alarm from the core list
    140     busylock_acquire( lock );
    141     list_unlink( &alarm->list );
    142     busylock_release( lock );
    143 
    144     // update the alarm date
    145     alarm->date = new_date;
    146 
    147     // register alarm in core list
    148     alarm_register( alarm , core );
    149    
     202    remote_busylock_release( lock_xp );
     203
    150204}  // end alarm_update()
    151205
    152 ////////////////////////////////////
    153 void alarm_stop( thread_t * thread )
    154 {
    155     // get pointer on alarm
    156     alarm_t * alarm = &thread->alarm;
    157 
    158     // get pointer on core
    159     core_t * core = thread->core;
    160 
    161     // get pointer on lock protecting the alarms list
    162     busylock_t * lock = &core->alarms_lock;
    163 
    164     // unlink the alarm from the list rooted in core
    165     busylock_acquire( lock );
    166     list_unlink( &alarm->list );
    167     busylock_release( lock );
    168 
    169 }  // end alarm_stop()
    170 
     206
  • trunk/kernel/kern/alarm.h

    r669 r683  
    3636 *   This structure defines a generic, timer based, kernel alarm.
    3737 *
    38  * - An alarm being attached to a given thread, the alarm descriptor is embedded in the
     38 * - An alarm is attached to a given thread, and the alarm descriptor is embedded in the
    3939 *   thread descriptor. A client thread can use the alarm_start() function to dynamically
    4040 *   activate the alarm. It can use the alarm_stop() function to desactivate this alarm.
    4141 * - This kernel alarm is generic, as the alarm handler (executed when the alarm rings),
    42  *   and the handler arguments are defined by two pointers <func_ptr> and <args_xp>.
     42 *   and the handler arguments are defined by two pointers: <func_ptr> and <args_xp>.
    4343 * - When an alarm is created by a client thread, it is registered in the list of alarms
    4444 *   rooted in the core running the client thread. When it is stopped, the alarm is simply
    4545 *   removed from this list.
    46  * - When creating an alarm, the client thread must define an absolute date (in cycles),
    47  *   the func_ptr local pointer, and the args_xp extended pointer.
     46 * - When creating an alarm with the alarm_start() function, the client thread must define
     47 *   an absolute date (in cycles), the func_ptr pointer, and the args_xp extended pointer.
    4848 * - The list of alarms is ordered by increasing dates. At each TICK received by a core,
    4949 *   the date of the first registered alarm is compared to the current date (in the
    5050 *   core_clock() function). The alarm handler is executed when current_date >= alarm_date.
    51  * - It is the handler responsability to stop a ringing alarm, or update the date. 
     51 * - It is the handler responsability to stop and delete a ringing alarm using the
     52 *   alarm_stop() function, or update the alarm date using the alarm_update() function. 
     53 * - The three alarm_start(), alarm_stop(), and alarm_update() access functions use
     54 *   the lock protecting the alarms list to handle concurrent accesses. These functions
     55 *   use extended pointers to access the alarm list, and can be called by a thread
     56 *   running in any cluster.
    5257 *
    53  * This mechanism is used bi the almos_mkh implementation of the TCP protocoL.
     58 * This embedded alarm mechanism is used by:
     59 * 1. the socket_accept(), socket_connect(), socket_send(), socket_close() functions,
     60 *    to implement the TCP retransmission machanism.
     61 * 2. the sys_thread_sleep() function, to implement the "sleep" mechanism.
    5462 ******************************************************************************************/
    5563
    5664typedef struct alarm_s
    5765{
     66    bool_t         linked;         /*! active when true (i.e linked to the core list)     */
    5867    cycle_t        date;           /*! absolute date for handler execution                */
    5968    void         * func_ptr;       /*! local pointer on alarm handler function            */
    6069    xptr_t         args_xp;        /*! local pointer on handler arguments                 */
    61     list_entry_t   list;           /*! all alarms attached to the same core               */
     70    list_entry_t   list;           /*! set of active alarms attached to the same core     */
    6271}
    6372alarm_t;
     
    7079
    7180/*******************************************************************************************
     81 * This function initialises the alarm state to "inactive".
     82 *******************************************************************************************
     83 * @ alarm     : local pointer on alarm.
     84 ******************************************************************************************/
     85void alarm_init( alarm_t *  alarm );
     86
     87/*******************************************************************************************
    7288 * This function initializes the alarm descriptor embedded in the thread identified by the
    73  * <thread> argument from the <date>, <func_ptr>, <args_ptr> arguments, and registers it
    74  * in the ordered list rooted in the core running this <thread>.
     89 * <thread_xp> argument from the <date>, <func_ptr>, <args_ptr> arguments, and registers
     90 * this alarm in the ordered list rooted in the core running this thread.
     91 * It takes the lock protecting the alarms list against concurrent accesses.
    7592 *******************************************************************************************
     93 * @ thread_xp  : extended pointer on the target thread.
    7694 * @ date       : absolute date (in cycles).
    7795 * @ func_ptr   : local pointer on the handler to execute when the alarm rings.
    7896 * @ args_xp    : extended pointer on the handler arguments.
    79  * @ thread     : local pointer on the client thread.
    8097 ******************************************************************************************/
    81 void alarm_start( cycle_t           date,
    82                   void            * func_ptr,
    83                   xptr_t            args_xp,
    84                   struct thread_s * thread );
     98void alarm_start( xptr_t    thread_xp,
     99                  cycle_t   date,
     100                  void    * func_ptr,
     101                  xptr_t    args_xp );
    85102
    86103/*******************************************************************************************
     
    88105 * <thread> argument. The list of alarms rooted in the core running the client thread
    89106 * is modified to respect the absolute dates ordering.
     107 * It takes the lock protecting the alarms list against concurrent accesses.
    90108 *******************************************************************************************
    91  * @ thread     : local pointer on the client thread.
     109 * @ thread_xp  : extended pointer on the target thread.
    92110 * @ new_date   : absolute new date (in cycles).
    93111 ******************************************************************************************/
    94 void alarm_update( struct thread_s * thread,
    95                    cycle_t           new_date );
     112void alarm_update( xptr_t     thread_xp,
     113                   cycle_t    new_date );
    96114
    97115/*******************************************************************************************
    98116 * This function unlink an alarm identified by the <thread> argument from the list of
    99117 * alarms rooted in the core descriptor.
     118 * It takes the lock protecting the alarms list against concurrent accesses.
    100119 *******************************************************************************************
    101  * @ thread     : local pointer on the client thread.
     120 * @ thread_xp  : extended pointer on the target thread.
    102121 ******************************************************************************************/
    103 void alarm_stop( struct thread_s * thread );
     122void alarm_stop( xptr_t    thread_xp );
    104123
    105124
  • trunk/kernel/kern/chdev.c

    r669 r683  
    8787{
    8888    chdev_t    * chdev;
    89     kmem_req_t   req;
    9089
    9190    // allocate memory for chdev
    92     req.type   = KMEM_KCM;
    93     req.order  = bits_log2( sizeof(chdev_t) );
    94     req.flags  = AF_ZERO | AF_KERNEL;
    95     chdev      = kmem_alloc( &req );
     91    chdev = kmem_alloc( bits_log2(sizeof(chdev_t)) , AF_ZERO | AF_KERNEL );
    9692
    9793    if( chdev == NULL ) return NULL;
     
    114110}  // end chdev_create()
    115111
    116 ///////////////////////////////////
    117 void chdev_print( chdev_t * chdev )
    118 {
    119     printk("\n - func      = %s"
    120            "\n - channel   = %d"
    121            "\n - base      = %l"
     112/////////////////////////////////////
     113void chdev_display( xptr_t chdev_xp )
     114{
     115    chdev_t * chdev = GET_PTR( chdev_xp );
     116    cxy_t     cxy   = GET_CXY( chdev_xp );
     117
     118    char      name[16];
     119
     120    hal_remote_memcpy( XPTR( local_cxy, name ),
     121                       XPTR( cxy , &chdev->name ), 16 );
     122
     123    printk("\n - chdev     = [%x,%x]"
     124           "\n - name      = %s"
     125           "\n - base      = [%x,%x]"
    122126           "\n - cmd       = %x"
    123            "\n - isr       = %x"
    124            "\n - chdev     = %x\n",
    125            chdev_func_str(chdev->func),
    126            chdev->channel,
    127            chdev->base,
    128            chdev->cmd,
    129            chdev->isr,
    130            chdev );
    131 }
     127           "\n - isr       = %x\n",
     128           cxy,
     129           chdev,
     130           name,
     131           GET_CXY( hal_remote_l64( XPTR( cxy , &chdev->base ))),
     132           GET_PTR( hal_remote_l64( XPTR( cxy , &chdev->base ))),
     133           hal_remote_lpt( XPTR( cxy , &chdev->cmd )),
     134           hal_remote_lpt( XPTR( cxy , &chdev->isr )) );
     135
     136}  // end chdev_display()
    132137
    133138//////////////////////////////////////////////////
     
    450455    chdev_t     * chdev_ptr;
    451456
    452     assert( __FUNCTION__, (file_xp != XPTR_NULL) ,
    453     "file_xp == XPTR_NULL\n" );
     457assert( __FUNCTION__, (file_xp != XPTR_NULL) ,
     458"file_xp == XPTR_NULL" );
    454459
    455460    // get cluster and local pointer on remote file descriptor
     
    462467    inode_ptr  = (vfs_inode_t *)hal_remote_lpt( XPTR( file_cxy , &file_ptr->inode ) );
    463468
    464     assert( __FUNCTION__, (inode_type == FILE_TYPE_DEV) ,
    465     "inode type %d is not FILE_TYPE_DEV\n", inode_type );
     469assert( __FUNCTION__, (inode_type == FILE_TYPE_DEV) ,
     470"inode type %d is not FILE_TYPE_DEV", inode_type );
    466471
    467472    // get chdev local pointer from inode extension
  • trunk/kernel/kern/chdev.h

    r669 r683  
    121121 *      . This busylock is also used to protect direct access to the shared
    122122 *        kernel TXT0 terminal, that does not use the waiting queue.
    123  *      . For mostd chdevs, the client waiting queue is an xlist of threads, but it is
     123 *      . For most chdevs, the client waiting queue is a list of threads, but it is
    124124 *        a list of sockets for the NIC chdevs. It is unused for ICU, PIC, and IOB.
    125125 *****************************************************************************************/
     
    190190
    191191/****************************************************************************************
    192  * This function display relevant values for a chdev descriptor.
    193  ****************************************************************************************
    194  * @ chdev   : pointer on chdev.
    195  ***************************************************************************************/
    196 void chdev_print( chdev_t * chdev );
    197 
    198 /****************************************************************************************
    199192 * This function returns a printable string for a device functionnal types.
    200193 ****************************************************************************************
     
    223216
    224217/****************************************************************************************
    225  * This generid function is executed by an user thread requesting an IOC or TXT chdev
     218 * This generic function is executed by an user thread requesting an IOC or TXT chdev
    226219 * service. It registers the calling thread in the waiting queue of a the remote
    227220 * chdev descriptor identified by the <chdev_xp> argument.
     
    282275
    283276/****************************************************************************************
     277 * This function display relevant values for a remote chdev descriptor.
     278 ****************************************************************************************
     279 * @ chdev_xp   : pointer on chdev.
     280 ***************************************************************************************/
     281void chdev_display( xptr_t chdev_xp );
     282
     283/****************************************************************************************
    284284 * This function displays the local copy of the external chdevs directory.
    285285 * (global variable replicated in all clusters)
  • trunk/kernel/kern/cluster.c

    r669 r683  
    22 * cluster.c - Cluster-Manager related operations
    33 *
    4  * Author  Ghassan Almaless (2008,2009,2010,2011,2012)
     4 * Author  Ghassan Almaless       (2008,2009,2010,2011,2012)
    55 *         Mohamed Lamine Karaoui (2015)
    6  *         Alain Greiner (2016,2017,2018,2019,2020)
     6 *         Alain Greiner          (2016,2017,2018,2019,2020)
    77 *
    88 * Copyright (c) UPMC Sorbonne Universites
     
    7474        cluster->y_size          = info->y_size;
    7575        cluster->io_cxy          = info->io_cxy;
     76        cluster->sys_clk         = info->sys_clk;
    7677
    7778    // initialize the cluster_info[][] array
     
    177178printk("\n[%s] PPM initialized in cluster %x / cycle %d\n",
    178179__FUNCTION__ , local_cxy , cycle );
    179 #endif
    180 
    181     // initialises embedded KHM
    182         khm_init( &cluster->khm );
    183 
    184 #if( DEBUG_CLUSTER_INIT & 1 )
    185 cycle = (uint32_t)hal_get_cycles();
    186 if( DEBUG_CLUSTER_INIT < cycle )
    187 printk("\n[%s] KHM initialized in cluster %x at cycle %d\n",
    188 __FUNCTION__ , local_cxy , hal_get_cycles() );
    189180#endif
    190181
  • trunk/kernel/kern/cluster.h

    r657 r683  
    22 * cluster.h - Cluster-Manager definition
    33 *
    4  * authors  Ghassan Almaless (2008,2009,2010,2011,2012)
     4 * authors  Ghassan Almaless       (2008,2009,2010,2011,2012)
    55 *          Mohamed Lamine Karaoui (2015)
    6  *          Alain Greiner (2016,2017,2018,2019,2019,2020)
     6 *          Alain Greiner          (2016,2017,2018,2019,2020)
    77 *
    88 * Copyright (c) UPMC Sorbonne Universites
     
    3939#include <ppm.h>
    4040#include <kcm.h>
    41 #include <khm.h>
    4241#include <rpc.h>
    4342#include <core.h>
     
    105104    uint32_t        x_size;            /*! number of clusters in a row    (can be 1)      */
    106105    uint32_t        y_size;            /*! number of clusters in a column (can be 1)      */
    107     cxy_t           io_cxy;            /*! io cluster identifier                          */
     106    uint32_t        io_cxy;            /*! io cluster identifier                          */
     107    uint32_t        sys_clk;           /*! system_clock frequency (in Hertz)              */
    108108    uint32_t        dqdt_root_level;   /*! index of root node in dqdt_tbl[]               */
    109109    uint32_t        nb_txt_channels;   /*! number of TXT channels                         */
     
    124124    list_entry_t    dev_root;          /*! root of list of devices in cluster             */
    125125
    126     // memory allocators
    127     ppm_t           ppm;               /*! embedded kernel page manager                   */
    128     khm_t           khm;               /*! embedded kernel heap manager                   */
    129     kcm_t           kcm[6];            /*! embedded kernel cache managers [6:11]          */
     126    // physical memory allocators: one PPM and severa KCM
     127    ppm_t           ppm;
     128    kcm_t           kcm[CONFIG_PPM_PAGE_ORDER - CONFIG_CACHE_LINE_ORDER];
    130129
    131130    // RPC
  • trunk/kernel/kern/core.c

    r669 r683  
    33 *
    44 * Author  Ghassan Almaless (2008,2009,2010,2011,2012)
    5  *         Alain Greiner (2016,2017,2018)
     5 *         Alain Greiner    (2016,2017,2018,2019,2020)
    66 *
    77 * Copyright (c) UPMC Sorbonne Universites
     
    4747        core->ticks_nr          = 0;
    4848        core->usage             = 0;
    49         core->spurious_irqs     = 0;
    5049        core->fpu_owner         = NULL;
    5150        core->rand_last         = hal_time_stamp() & 0xFFF;
     
    5554
    5655    // initialise the alarms lock
    57     busylock_init( &core->alarms_lock , LOCK_CORE_ALARMS );
     56    remote_busylock_init( XPTR( local_cxy , &core->alarms_lock ) , LOCK_CORE_ALARMS );
    5857
    5958    // initialise the alarms list
     
    6160}
    6261
    63 ///////////////////////////////////////
    64 void core_check_alarms( core_t * core )
     62////////////////////////////////////////////////////////////////////////////////////
     63// This static function checks the alarms registered in the core, and calls the
     64// relevant alarm handler for all alarms whose time is elapded.
     65// It does not take the lock protecting the alarm list, because it access only
     66// the first alarm in the list, and all modifications in he list are done
     67// the low level access functions called by the handler(s).
     68////////////////////////////////////////////////////////////////////////////////////
     69static void core_check_alarms( core_t * core )
    6570{
    6671    alarm_handler_t * handler;
     
    7277    if( list_is_empty( root ) ) return;
    7378
    74     // get pointer on first alarm when list non empty
    75     alarm_t * alarm = LIST_FIRST( root , alarm_t , list );
    76 
    77     // get first alarm date
    78     cycle_t alarm_date = alarm->date;
    79 
    80     // get current date
    81     cycle_t current_date = hal_get_cycles();
    82 
    83     if( current_date >= alarm_date )
     79    while( 1 )
    8480    {
    85         // get pointer on registered alarm handler
    86         handler = (alarm_handler_t *)alarm->func_ptr;
    87 
    88         // call alarm handler
    89         handler( alarm->args_xp );
     81        // get pointer on first alarm
     82        alarm_t * alarm = LIST_FIRST( root , alarm_t , list );
     83
     84        // get first alarm date
     85        cycle_t alarm_date = alarm->date;
     86
     87        // get current date
     88        cycle_t current_date = hal_get_cycles();
     89
     90        // call handler if delay elapsed, and retry
     91        if( current_date >= alarm_date )
     92        {
     93            // get pointer on registered alarm handler
     94            handler = (alarm_handler_t *)alarm->func_ptr;
     95
     96            // call alarm handler
     97            handler( alarm->args_xp );
     98        }
     99        else   // exit loop when first alarm delay not elapsed
     100        {
     101            break;
     102        }
    90103    }
    91104}   // end core_check_alarms()
     
    127140                    uint32_t * tm_us )
    128141{
    129         *tm_s  = (core->ticks_nr*CONFIG_SCHED_TICK_MS_PERIOD)/1000;
    130         *tm_us = (core->ticks_nr*CONFIG_SCHED_TICK_MS_PERIOD*1000)%1000000;
     142    // get number of cycles
     143    uint64_t cycles = core->cycles;
     144
     145    // get number of cycles per second
     146    uint32_t cycles_per_second = LOCAL_CLUSTER->sys_clk;
     147
     148    *tm_s  = cycles / cycles_per_second;
     149    *tm_us = (cycles * 1000000) % cycles_per_second;
    131150}
    132151
     
    139158        ticks = core->ticks_nr++;
    140159
     160    // handle alarms
     161    core_check_alarms( core );
     162
    141163        // handle scheduler
    142164        if( (ticks % CONFIG_SCHED_TICKS_PER_QUANTUM) == 0 ) sched_yield( "TICK");
    143 
    144     // handle alarms
    145     core_check_alarms( core );
    146165}
    147166
  • trunk/kernel/kern/core.h

    r669 r683  
    3939
    4040/****************************************************************************************
    41  * This structure defines the core descriptor.
    42  * Besides the core identifiers (gid,lid), it contains an embedded private scheduler.
     41 * This structure defines a core descriptor.
     42 * Besides the core identifiers (gid,lid), it contains an embedded private scheduler
     43 * and a software cycles counter on 64 bits.
     44 * It contains also the root of local list of alarms, dynamically registered by the
     45 * threads running on this core. This local list is protected by a remote_busylock,
     46 * because it can be accessed by any thread, running in any cluster, and using the
     47 * access functions defined in the <alarm.c> & <alarm.h> files.
    4348 * It contains an architecture specific extension to store the interrupt vector(s).
    4449 * The core_init()function must allocate memory for this extension, depending on the
     
    5156        gid_t               gid;            /*! core global identifier (hardware index)    */
    5257
     58        scheduler_t         scheduler;      /*! embedded private scheduler                 */
     59
    5360        uint64_t            cycles;         /*! total number of cycles (from hard reset)   */
    5461        uint32_t            time_stamp;     /*! previous time stamp (read from register)   */
    5562
    5663    list_entry_t        alarms_root;    /*! root of list of attached alarms            */
    57     busylock_t          alarms_lock;    /*! lock protecting the list of alarms         */
     64    remote_busylock_t   alarms_lock;    /*! lock protecting the list of alarms         */
    5865
    5966        uint32_t            ticks_nr;       /*! number of elapsed ticks                    */
    6067        uint32_t            usage;          /*! cumulated busy_percent (idle / total)      */
    61         uint32_t            spurious_irqs;  /*! for instrumentation...                     */
    6268        struct thread_s   * fpu_owner;      /*! pointer on current FPU owner thread        */
    6369    uint32_t            rand_last;      /*! last computed random value                 */
    64 
    65         scheduler_t         scheduler;      /*! embedded private scheduler                 */
    6670
    6771    void              * pic_extend;     /*! PIC implementation specific extension      */
  • trunk/kernel/kern/do_syscall.c

    r669 r683  
    22 * do_syscall.c - architecture independant entry-point for system calls.
    33 *
    4  * Author    Alain Greiner (2016,2017,2018, 2019)
     4 * Author    Alain Greiner (2016,2017,2018,2019,2020)
    55 *
    66 * Copyright (c) UPMC Sorbonne Universites
     
    4343///////////////////////////////////////////////////////////////////////////////////////
    4444// This array of pointers define the kernel functions implementing the syscalls.
    45 // It must be kept consistent with the enum in "syscalls_numbers.h" file.
     45// It must be kept consistent with the enum in <syscalls_numbers.h> file,
     46// and with the SYS_OBJs defined in the kernel <Makefile>.
    4647///////////////////////////////////////////////////////////////////////////////////////
    4748
     
    9495    sys_wait,               // 39
    9596
    96     sys_get_config,         // 40
    97     sys_get_core_id,        // 41
    98     sys_get_cycle,          // 42
    99     sys_display,            // 43
    100     sys_place_fork,         // 44
    101     sys_thread_sleep,       // 45
    102     sys_thread_wakeup,      // 46
    103     sys_trace,              // 47
    104     sys_fg,                 // 48
    105     sys_is_fg,              // 49
     97    sys_get,                // 40
     98    sys_display,            // 41
     99    sys_place_fork,         // 42
     100    sys_thread_sleep,       // 43
     101    sys_thread_wakeup,      // 44
     102    sys_trace,              // 45
     103    sys_fg,                 // 46
     104    sys_is_fg,              // 47
     105    sys_fbf,                // 48
     106    sys_undefined,          // 49   //
    106107
    107108    sys_exit,               // 50
    108109    sys_sync,               // 51
    109110    sys_fsync,              // 52
    110     sys_get_best_core,      // 53
    111     sys_get_nb_cores,       // 54
    112     sys_get_thread_info,    // 55
    113     sys_fbf,                // 56
    114     sys_socket,             // 57
     111    sys_socket,             // 53
    115112};
    116113
     
    164161    case SYS_WAIT:                         return "WAIT";             // 39
    165162
    166     case SYS_GET_CONFIG:                   return "GET_CONFIG";       // 40
    167     case SYS_GET_CORE_ID:                  return "GET_CORE_ID";      // 41
    168     case SYS_GET_CYCLE:                    return "GET_CYCLE";        // 42
    169     case SYS_DISPLAY:                      return "DISPLAY";          // 43
    170     case SYS_PLACE_FORK:                   return "PLACE_FORK";       // 44
    171     case SYS_THREAD_SLEEP:                 return "THREAD_SLEEP";     // 45
    172     case SYS_THREAD_WAKEUP:                return "THREAD_WAKEUP";    // 46
    173     case SYS_TRACE:                        return "TRACE";            // 47
    174     case SYS_FG:                           return "FG";               // 48
    175     case SYS_IS_FG:                        return "IS_FG";            // 49
     163    case SYS_GET:                          return "GET";              // 40
     164    case SYS_DISPLAY:                      return "DISPLAY";          // 41
     165    case SYS_PLACE_FORK:                   return "PLACE_FORK";       // 42
     166    case SYS_THREAD_SLEEP:                 return "THREAD_SLEEP";     // 43
     167    case SYS_THREAD_WAKEUP:                return "THREAD_WAKEUP";    // 44
     168    case SYS_TRACE:                        return "TRACE";            // 45
     169    case SYS_FG:                           return "FG";               // 46
     170    case SYS_IS_FG:                        return "IS_FG";            // 47
     171    case SYS_FBF:                          return "FBF";              // 48
    176172
    177173    case SYS_EXIT:                         return "EXIT";             // 50
    178174    case SYS_SYNC:                         return "SYNC";             // 51
    179175    case SYS_FSYNC:                        return "FSYNC";            // 52
    180     case SYS_GET_BEST_CORE:                return "GET_BEST_CORE";    // 53
    181     case SYS_GET_NB_CORES:                 return "GET_NB_CORES";     // 54
    182     case SYS_GET_THREAD_INFO:              return "GET_THREAD_INFO";  // 55
    183     case SYS_FBF:                          return "FBF";              // 56
    184     case SYS_SOCKET:                       return "SOCKET";           // 57
     176    case SYS_SOCKET:                       return "SOCKET";           // 53
    185177
    186178    default:                               return "undefined";
  • trunk/kernel/kern/kernel_init.c

    r669 r683  
    33 *
    44 * Authors :  Mohamed Lamine Karaoui (2015)
    5  *            Alain Greiner  (2016,2017,2018,2019,2020)
     5 *            Alain Greiner          (2016,2017,2018,2019,2020)
    66 *
    77 * Copyright (c) Sorbonne Universites
     
    4646#include <memcpy.h>
    4747#include <ppm.h>
     48#include <kcm.h>
    4849#include <page.h>
    4950#include <chdev.h>
     
    379380        if( func == DEV_FUNC_MMC ) 
    380381        {
    381 
    382             // check channels
    383             if( channels != 1 )
    384             {
    385                 printk("\n[PANIC] in %s : MMC device must be single channel\n",
    386                 __FUNCTION__ );
    387                 hal_core_sleep();
    388             }
    389 
    390382            // create chdev in local cluster
    391383            chdev_ptr = chdev_create( func,
     
    394386                                      false,      // direction
    395387                                      base );
    396 
    397             // check memory
    398388            if( chdev_ptr == NULL )
    399389            {
     
    403393            }
    404394           
     395#if (DEBUG_KERNEL_INIT & 0x1)
     396if( hal_time_stamp() > DEBUG_KERNEL_INIT )
     397printk("\n[%s] created chdev[%x,%x] for MMC\n",
     398__FUNCTION__ , local_cxy , chdev_ptr );
     399#endif
    405400            // make MMC specific initialisation
    406401            dev_mmc_init( chdev_ptr );
     
    423418#if( DEBUG_KERNEL_INIT & 0x1 )
    424419if( hal_time_stamp() > DEBUG_KERNEL_INIT )
    425 printk("\n[%s] : created MMC in cluster %x / chdev = %x\n",
     420printk("\n[%s] initialised chdev[%x,%x] for MMC\n",
    426421__FUNCTION__ , local_cxy , chdev_ptr );
    427422#endif
     
    439434                                          false,     // direction
    440435                                          base );
    441 
    442                 // check memory
    443436                if( chdev_ptr == NULL )
    444437                {
     
    448441                }
    449442           
     443#if (DEBUG_KERNEL_INIT & 0x1)
     444if( hal_time_stamp() > DEBUG_KERNEL_INIT )
     445printk("\n[%s] cxy %x : created chdev[%x,%x] for DMA[%d]\n",
     446__FUNCTION__ , local_cxy , chdev_ptr , channel );
     447#endif
    450448                // make DMA specific initialisation
    451449                dev_dma_init( chdev_ptr );     
     
    457455#if( DEBUG_KERNEL_INIT & 0x1 )
    458456if( hal_time_stamp() > DEBUG_KERNEL_INIT )
    459 printk("\n[%s] : created DMA[%d] in cluster %x / chdev = %x\n",
    460 __FUNCTION__ , channel , local_cxy , chdev_ptr );
     457printk("\n[%s] initialised chdev[%x,%x] for DMA[%d]\n",
     458__FUNCTION__ , local_cxy , chdev_ptr , channel );
    461459#endif
    462460            }
     
    471469// These chdev descriptors are distributed on all clusters, using a modulo on a global
    472470// index, identically computed in all clusters.
    473 // This function is executed in all clusters by the core[0] core, that computes a global index
    474 // for all external chdevs. Each core[0] core creates only the chdevs that must be placed in
    475 // the local cluster, because the global index matches the local index.
     471// This function is executed in all clusters by the core[0], that computes a global index
     472// for all external chdevs. Each core[0] core creates only the chdevs that must be placed
     473// in the local cluster, because the global index matches the local index.
    476474// The relevant entries in all copies of the devices directory are initialised.
    477475///////////////////////////////////////////////////////////////////////////////////////////
     
    499497    dev_tbl     = info->ext_dev;
    500498
    501     // initializes global index (PIC is already placed in cluster 0
     499    // initializes global index (PIC is already placed in cluster 0)
    502500    ext_chdev_gid = 1;
    503501
     
    529527
    530528        // check external device functionnal type
    531         if( (func != DEV_FUNC_IOB) && (func != DEV_FUNC_IOC) && (func != DEV_FUNC_TXT) &&
    532             (func != DEV_FUNC_NIC) && (func != DEV_FUNC_FBF) )
     529        if( (func != DEV_FUNC_IOB) &&
     530            (func != DEV_FUNC_IOC) &&
     531            (func != DEV_FUNC_TXT) &&
     532            (func != DEV_FUNC_NIC) &&
     533            (func != DEV_FUNC_FBF) )
    533534        {
    534535            printk("\n[PANIC] in %s : undefined peripheral type\n",
     
    537538        }
    538539
    539         // loops on channels
     540        // loop on channels
    540541        for( channel = 0 ; channel < channels ; channel++ )
    541542        {
     
    547548
    548549                // all kernel instances compute the target cluster for all chdevs,
    549                 // computing the global index ext_chdev_gid[func,channel,direction]
     550                // and the global index ext_chdev_gid[func,channel,direction]
    550551                cxy_t target_cxy;
    551552                while( 1 )
     
    568569                if( target_cxy == local_cxy )
    569570                {
    570 
    571 #if( DEBUG_KERNEL_INIT & 0x3 )
    572 if( hal_time_stamp() > DEBUG_KERNEL_INIT )
    573 printk("\n[%s] : found chdev %s / channel = %d / rx = %d / cluster %x\n",
    574 __FUNCTION__ , chdev_func_str( func ), channel , rx , local_cxy );
    575 #endif
    576571                    chdev = chdev_create( func,
    577572                                          impl,
     
    587582                    }
    588583
     584#if (DEBUG_KERNEL_INIT & 0x1)
     585if( hal_time_stamp() > DEBUG_KERNEL_INIT )
     586printk("\n[%s] created chdev[%x,%x] for %s[%d] / is_rx %d\n",
     587__FUNCTION__ , local_cxy , chdev , chdev_func_str(func) , channel , rx );
     588#endif
    589589                    // make device type specific initialisation
    590590                    if     ( func == DEV_FUNC_IOB ) dev_iob_init( chdev );
     
    621621                    }
    622622
    623 #if( DEBUG_KERNEL_INIT & 0x3 )
     623#if( DEBUG_KERNEL_INIT & 1 )
    624624if( hal_time_stamp() > DEBUG_KERNEL_INIT )
    625 printk("\n[%s] : created chdev %s / channel = %d / rx = %d / cluster %x / chdev = %x\n",
    626 __FUNCTION__ , chdev_func_str( func ), channel , rx , local_cxy , chdev );
    627 #endif
     625printk("\n[%s] initialised chdev[%x,%x] for %s\n",
     626__FUNCTION__ , local_cxy, chdev , chdev->name );
     627#endif
     628
    628629                }  // end if match
    629630
     
    637638
    638639///////////////////////////////////////////////////////////////////////////////////////////
    639 // This function is called by core[0] in cluster 0 to allocate memory and initialize the PIC
     640// This function is called by core[0][0] to allocate memory and initialize the PIC
    640641// device, namely the informations attached to the external IOPIC controller, that
    641642// must be replicated in all clusters (struct iopic_input).
     
    11021103    // and allocates memory for the corresponding chdev descriptors.
    11031104    if( core_lid == 0 ) internal_devices_init( info );
    1104        
    11051105
    11061106    // All core[0]s contribute to initialise external peripheral chdev descriptors.
     
    14941494                   " - core descriptor    : %d bytes\n"
    14951495                   " - scheduler          : %d bytes\n"
    1496                    " - socket             : %d bytes\n"
     1496                   " - socket descriptor  : %d bytes\n"
    14971497                   " - rpc fifo           : %d bytes\n"
    14981498                   " - page descriptor    : %d bytes\n"
     
    15011501                   " - ppm manager        : %d bytes\n"
    15021502                   " - kcm manager        : %d bytes\n"
    1503                    " - khm manager        : %d bytes\n"
    15041503                   " - vmm manager        : %d bytes\n"
    15051504                   " - vfs inode          : %d bytes\n"
     
    15291528                   sizeof( ppm_t              ),
    15301529                   sizeof( kcm_t              ),
    1531                    sizeof( khm_t              ),
    15321530                   sizeof( vmm_t              ),
    15331531                   sizeof( vfs_inode_t        ),
     
    15461544#endif
    15471545
     1546    // number of cycles per TICK (depends on the actual system clock frequency
     1547    uint32_t cycles_per_tick = cluster->sys_clk / CONFIG_SCHED_TICKS_PER_SECOND;
     1548
    15481549    // each core activates its private TICK IRQ
    1549     dev_pic_enable_timer( CONFIG_SCHED_TICK_MS_PERIOD );
     1550    dev_pic_enable_timer( cycles_per_tick );
    15501551
    15511552    /////////////////////////////////////////////////////////////////////////////////
  • trunk/kernel/kern/ksocket.c

    r669 r683  
    11/*
    2  * ksocket.c - kernel socket API implementation.
     2 * ksocket.c - kernel socket implementation.
    33 *
    4  * Authors  Alain Greiner   (2016,2017,2018,2019,2020)
     4 * Authors  Alain Greiner        (2016,2017,2018,2019,2020)
    55 *
    66 * Copyright (c) UPMC Sorbonne Universites
     
    117117    switch( sts )
    118118    {
    119         case CMD_STS_SUCCESS  : return "TX_CONNECT";
     119        case CMD_STS_SUCCESS  : return "SUCCESS";
    120120        case CMD_STS_EOF      : return "EOF";
    121121        case CMD_STS_RST      : return "RST";
     
    135135// and request the NIC_TX server thread to re-send the unacknowledged segment.
    136136///////////////////////////////////////////////////////////////////////////////////////////
    137 // @ args_xp    : extended pointer on the involved socket.
     137// @ sock_xp    : extended pointer on the involved socket.
    138138///////////////////////////////////////////////////////////////////////////////////////////
    139 static void __attribute__((noinline)) socket_alarm_handler( xptr_t args_xp )
     139static void __attribute__((noinline)) socket_alarm_handler( xptr_t sock_xp )
    140140{
    141141    // get cluster and local pointer on socket descriptor
    142     socket_t * sock_ptr = GET_PTR( args_xp );
    143     cxy_t      sock_cxy = GET_CXY( args_xp );
     142    socket_t * sock_ptr = GET_PTR( sock_xp );
     143    cxy_t      sock_cxy = GET_CXY( sock_xp );
     144
     145#if DEBUG_SOCKET_ALARM
     146uint32_t cycle = (uint32_t)hal_get_cycles();
     147#endif
     148
     149   // build extended pointer on lock protecting socket
     150    xptr_t socket_lock_xp = XPTR( sock_cxy , &sock_ptr->lock );
     151
     152    // take the socket lock
     153    remote_queuelock_acquire( socket_lock_xp );
    144154
    145155    // get relevant infos from socket descriptor
     
    151161"illegal tx_client field for a retransmission timeout" );
    152162
    153     // get TX client thread cluster and local pointer
    154     thread_t * thread_ptr = GET_PTR( thread_xp );
     163    // get TX client thread cluster
    155164    cxy_t      thread_cxy = GET_CXY( thread_xp );
    156165
     
    168177
    169178    // update the date in alarm
    170     alarm_update( thread_ptr , hal_get_cycles() + TCP_RETRANSMISSION_TIMEOUT );
     179    alarm_update( thread_xp , hal_get_cycles() + CONFIG_SOCK_RETRY_TIMEOUT );
    171180   
    172181    //////////////////////////////
     
    175184
    176185#if DEBUG_SOCKET_ALARM
    177 uint32_t cycle = (uint32_t)hal_get_cycles();
    178 printk("\n[%s] rings for TX_CONNECT : request a new SYN segment / cycle %d\n",
     186if( DEBUG_SOCKET_ALARM < cycle )
     187printk("\n[%s] rings for CONNECT : request a new SYN segment / cycle %d\n",
    179188__FUNCTION__ , cycle );
    180189#endif
     
    193202
    194203#if DEBUG_SOCKET_ALARM
    195 uint32_t cycle = (uint32_t)hal_get_cycles();
    196 printk("\n[%s] rings for TX_ACCEPT : request a new SYN-ACK segment / cycle %d\n",
     204if( DEBUG_SOCKET_ALARM < cycle )
     205printk("\n[%s] rings for ACCEPT : request a new SYN-ACK segment / cycle %d\n",
    197206__FUNCTION__ , cycle );
    198207#endif
     
    211220
    212221#if DEBUG_SOCKET_ALARM
    213 uint32_t cycle = (uint32_t)hal_get_cycles();
    214 printk("\n[%s] rings for TX_CLOSE : request a new FIN-ACK segment / cycle %d\n",
     222if( DEBUG_SOCKET_ALARM < cycle )
     223printk("\n[%s] rings for CLOSE : request a new FIN-ACK segment / cycle %d\n",
    215224__FUNCTION__ , cycle );
    216225#endif
     
    227236    if( tx_cmd == CMD_TX_SEND )
    228237    {
    229         // TODO build a new TX_SEND command
    230     }
     238        // get get relevant infos from socket pointer
     239        uint32_t  tx_una = hal_remote_l32( XPTR( sock_cxy , &sock_ptr->tx_una ));
     240        uint32_t  tx_ack = hal_remote_l32( XPTR( sock_cxy , &sock_ptr->tx_ack ));
     241        uint32_t  tx_len = hal_remote_l32( XPTR( sock_cxy , &sock_ptr->tx_len ));
     242
     243#if DEBUG_SOCKET_ALARM       
     244if( DEBUG_SOCKET_ALARM < cycle )
     245printk("\n[%s] rings for SEND : request %d bytes / cycle %d\n",
     246__FUNCTION__ , tx_len , cycle );
     247#endif
     248        // update command fields in socket
     249        hal_remote_s32( XPTR( sock_cxy , &sock_ptr->tx_nxt    ) , tx_una );
     250        hal_remote_s32( XPTR( sock_cxy , &sock_ptr->tx_todo   ) , tx_len - tx_ack );
     251        hal_remote_s32( XPTR( sock_cxy , &sock_ptr->tx_valid  ) , true );
     252
     253        // unblock the NIC_TX server thread
     254        thread_unblock( tx_server_xp , THREAD_BLOCKED_CLIENT );
     255    }
     256
     257    // release the socket lock
     258    remote_queuelock_release( socket_lock_xp );
     259
    231260}   // end socket_alarm_handler()
    232 
    233 ///////////////////////////////////////////////////////////////////////////////////////////
    234 // This static function activates the alarm embedded in the calling thread descriptor,
    235 // using the <date> argument.
    236 ///////////////////////////////////////////////////////////////////////////////////////////
    237 // @ delay   : number of cycles (from the current absolute date).
    238 ///////////////////////////////////////////////////////////////////////////////////////////
    239 static void socket_alarm_start( xptr_t   socket_xp,
    240                                 uint32_t delay )
    241 {
    242     // compute absolute date
    243     cycle_t date = hal_get_cycles() + delay;
    244 
    245     // get pointer on calling threadf
    246     thread_t * this = CURRENT_THREAD;
    247 
    248     // start the alarm
    249     alarm_start( date,
    250                  &socket_alarm_handler,   // func_ptr
    251                  socket_xp,               // args_xp
    252                  this );
    253 }
    254 
    255 ///////////////////////////////////////////////////////////////////////////////////////////
    256 // This static function activates the alarm embedded in the calling thread descriptor,
    257 // using the <date> argument.
    258 ///////////////////////////////////////////////////////////////////////////////////////////
    259 // @ date   : absolute date for this alarm.
    260 ///////////////////////////////////////////////////////////////////////////////////////////
    261 static void socket_alarm_stop( void )
    262 {
    263     // get pointer on calling threadf
    264     thread_t * this = CURRENT_THREAD;
    265 
    266     // stop the alarm
    267     alarm_stop( this );
    268 }
    269261
    270262/////////////////////////////////////////////////////////////////////////////////////////
     
    470462// associated to a socket: file descriptor, socket descriptor, RX buffer, R2T queue,
    471463// and CRQ queue. It allocates an fdid, and register it in the process fd_array.
    472 // It initialise the  the socket desccriptor static fields, other than local_addr,
     464// It initialise the socket desccriptor static fields, other than local_addr,
    473465// local_port, remote_addr, remote_port), and set the socket state to UNBOUND.
    474466// It returns the local pointer on socket descriptor and the fdid value in buffers
     
    489481{
    490482    uint32_t       fdid;
    491     kmem_req_t     req;
    492483    socket_t     * socket;
    493484    vfs_file_t   * file;
    494485    uint32_t       state;
     486    void         * tx_buf;
    495487    error_t        error;
     488
    496489
    497490    thread_t  * this    = CURRENT_THREAD;
    498491    process_t * process = this->process;
    499492
     493#if DEBUG_SOCKET_CREATE || DEBUG_SOCKET_ERROR
     494uint32_t cycle = (uint32_t)hal_get_cycles();
     495#endif
     496
    500497#if DEBUG_SOCKET_CREATE
    501 uint32_t cycle = (uint32_t)hal_get_cycles();
    502498if( DEBUG_SOCKET_CREATE < cycle )
    503499printk("\n[%s] thread[%x,%x] enter / cycle %d\n",
     
    506502   
    507503    // 1. allocate memory for socket descriptor
    508     req.type   = KMEM_KCM;
    509     req.order  = bits_log2( sizeof(socket_t) );
    510     req.flags  = AF_ZERO;
    511     socket     = kmem_remote_alloc( cxy , &req );
     504    socket = kmem_remote_alloc( cxy , bits_log2(sizeof(socket_t)) , AF_ZERO );
    512505
    513506    if( socket == NULL )
    514507    {
    515         printk("\n[ERROR] in %s : cannot allocate socket descriptor / thread[%x,%x]\n",
    516         __FUNCTION__, process->pid, this->trdid );
    517         return -1;
    518     }
    519 
    520     // 2. allocate memory for rx_buf buffer
     508
     509#if DEBUG_SOCKET_ERROR
     510printk("\n[ERROR] in %s : cannot allocate socket descriptor / thread[%x,%x] / cycle %d\n",
     511__FUNCTION__, process->pid, this->trdid, cycle );
     512#endif
     513        return -1;
     514    }
     515
     516    // 2. allocate memory for rx_buf data buffer
    521517    error = remote_buf_init( XPTR( cxy , &socket->rx_buf ),
    522                              bits_log2( CONFIG_SOCK_RX_BUF_SIZE ) );
     518                             CONFIG_SOCK_RX_BUF_ORDER );
    523519
    524520    if( error )
    525521    {
    526         printk("\n[ERROR] in %s : cannot allocate rx_buf / thread[%x,%x]\n",
    527         __FUNCTION__, process->pid, this->trdid );
    528         req.type = KMEM_KCM;
    529         req.ptr  = socket;
    530         kmem_remote_free( cxy , &req );
    531         return -1;
    532     }
    533 
    534     // 3. allocate memory for r2tq queue
     522
     523#if DEBUG_SOCKET_ERROR
     524printk("\n[ERROR] in %s : no memory for rx_buf / thread[%x,%x] / cycle %d\n",
     525__FUNCTION__, process->pid, this->trdid, cycle );
     526#endif
     527        kmem_remote_free( cxy , socket , bits_log2(sizeof(socket_t)) );  // 1
     528        return -1;
     529    }
     530
     531    // 3. allocate memory for tx_buf
     532    tx_buf = kmem_remote_alloc( cxy , CONFIG_SOCK_TX_BUF_ORDER , AF_NONE );
     533
     534    if( tx_buf == NULL )
     535    {
     536
     537#if DEBUG_SOCKET_ERROR
     538printk("\n[ERROR] in %s : no memory for tx_buf / thread[%x,%x] / cycle %d\n",
     539__FUNCTION__, process->pid, this->trdid, cycle );
     540#endif
     541        remote_buf_release_data( XPTR( cxy , &socket->rx_buf ) );        // 2
     542        kmem_remote_free( cxy , socket , bits_log2(sizeof(socket_t)) );  // 1
     543        return -1;
     544    }
     545
     546    // 4. allocate memory for r2tq queue
    535547    error = remote_buf_init( XPTR( cxy , &socket->r2tq ),
    536548                             bits_log2( CONFIG_SOCK_R2T_BUF_SIZE ) );
    537549    if( error )
    538550    {
    539         printk("\n[ERROR] in %s : cannot allocate R2T queue / thread[%x,%x]\n",
    540         __FUNCTION__, process->pid, this->trdid );
    541         remote_buf_release_data( XPTR( cxy , &socket->rx_buf ) );
    542         req.type = KMEM_KCM;
    543         req.ptr  = socket;
    544         kmem_remote_free( cxy , &req );
    545         return -1;
    546     }
    547 
    548     // don't allocate memory for crqq queue, as it is done by the socket_listen function
    549 
    550     //  4. allocate memory for file descriptor
    551         req.type  = KMEM_KCM;
    552         req.order = bits_log2( sizeof(vfs_file_t) );
    553     req.flags = AF_ZERO;
    554         file      = kmem_remote_alloc( cxy , &req );
     551
     552#if DEBUG_SOCKET_ERROR
     553printk("\n[ERROR] in %s : cannot allocate R2T queue / thread[%x,%x] / cycle %d\n",
     554__FUNCTION__, process->pid, this->trdid, cycle );
     555#endif
     556        kmem_remote_free( cxy , tx_buf , CONFIG_SOCK_TX_BUF_ORDER );     // 3
     557        remote_buf_release_data( XPTR( cxy , &socket->rx_buf ) );        // 2
     558        kmem_remote_free( cxy , socket , bits_log2(sizeof(socket_t)) );  // 1
     559        return -1;
     560    }
     561
     562    // don't allocate memory for CRQ queue / done by the socket_listen function
     563
     564    // 5. allocate memory for file descriptor
     565        file = kmem_remote_alloc( cxy , bits_log2(sizeof(vfs_file_t)) , AF_ZERO );
    555566
    556567    if( file == NULL )
    557568    {
    558         printk("\n[ERROR] in %s : cannot allocate file descriptor / thread[%x,%x]\n",
    559         __FUNCTION__, process->pid, this->trdid );
    560         remote_buf_release_data( XPTR( cxy , &socket->r2tq ) );
    561         remote_buf_release_data( XPTR( cxy , &socket->rx_buf ) );
    562         req.type = KMEM_KCM;
    563         req.ptr  = socket;
    564         kmem_remote_free( cxy , &req );
     569
     570#if DEBUG_SOCKET_ERROR
     571printk("\n[ERROR] in %s : cannot allocate file descriptor / thread[%x,%x] / cycle %d\n",
     572__FUNCTION__, process->pid, this->trdid, cycle );
     573#endif
     574        remote_buf_release_data( XPTR( cxy , &socket->r2tq ) );          // 4
     575        kmem_remote_free( cxy , tx_buf , CONFIG_SOCK_TX_BUF_ORDER );     // 3
     576        remote_buf_release_data( XPTR( cxy , &socket->rx_buf ) );        // 2
     577        kmem_remote_free( cxy , socket , bits_log2(sizeof(socket_t)) );  // 1
    565578        return -1;
    566579    }
    567580   
    568     // 5. get an fdid value, and register file descriptor in fd_array[]
     581    // 6. get an fdid value, and register file descriptor in fd_array[]
    569582    error = process_fd_register( process->ref_xp,
    570583                                 XPTR( cxy , file ),
     
    572585    if ( error )
    573586    {
    574         printk("\n[ERROR] in %s : cannot register file descriptor / thread[%x,%x]\n",
    575         __FUNCTION__, process->pid, this->trdid );
    576         req.type = KMEM_KCM;
    577         req.ptr  = file;
    578         kmem_free( &req );
    579         remote_buf_release_data( XPTR( cxy , &socket->r2tq ) );
    580         remote_buf_release_data( XPTR( cxy , &socket->rx_buf ) );
    581         req.ptr  = socket;
    582         kmem_free( &req );
     587
     588#if DEBUG_SOCKET_ERROR
     589if( DEBUG_SOCKET_ERROR < cycle )
     590printk("\n[ERROR] in %s : cannot register file descriptor / thread[%x,%x] / cycle %d\n",
     591__FUNCTION__, process->pid, this->trdid, cycle );
     592#endif
     593        kmem_remote_free( cxy , file , bits_log2(sizeof(vfs_file_t)) );  // 5
     594        remote_buf_release_data( XPTR( cxy , &socket->r2tq ) );          // 4
     595        kmem_remote_free( cxy , tx_buf , CONFIG_SOCK_TX_BUF_ORDER );     // 3
     596        remote_buf_release_data( XPTR( cxy , &socket->rx_buf ) );        // 2
     597        kmem_remote_free( cxy , socket , bits_log2(sizeof(socket_t)) );  // 1
    583598        return -1;
    584599    }
     
    597612    hal_remote_s32( XPTR( cxy , &socket->rx_valid    ) , false );
    598613    hal_remote_s32( XPTR( cxy , &socket->nic_channel ) , 0 );
     614    hal_remote_spt( XPTR( cxy , &socket->tx_buf      ) , tx_buf );
    599615
    600616    // initialize file descriptor
     
    606622
    607623#if DEBUG_SOCKET_CREATE
     624cycle = (uint32_t)hal_get_cycles();
    608625if( DEBUG_SOCKET_CREATE < cycle )
    609626printk("\n[%s] thread[%x,%x] exit / socket[%x,%d] / xptr[%x,%x] / cycle %d\n",
     
    631648static void socket_destroy( xptr_t file_xp )
    632649{
    633     kmem_req_t          req;
    634 
    635650    thread_t  * this    = CURRENT_THREAD;
    636651    process_t * process = this->process;
     
    677692
    678693    // release memory allocated for file descriptor
    679     req.type = KMEM_KCM;
    680     req.ptr  = file_ptr;
    681     kmem_remote_free( file_cxy , &req );
     694    kmem_remote_free( file_cxy , file_ptr , bits_log2(sizeof(vfs_file_t)) );
    682695
    683696    // release memory allocated for buffers attached to socket descriptor
     
    687700
    688701    // release memory allocated for socket descriptor
    689     req.type = KMEM_KCM;
    690     req.ptr  = socket_ptr;
    691     kmem_remote_free( file_cxy , &req );
     702    kmem_remote_free( file_cxy , socket_ptr , bits_log2(sizeof(socket_t)) );
    692703
    693704#if DEBUG_SOCKET_DESTROY
     
    702713////////////////////////////////////////////////
    703714void socket_put_r2t_request( xptr_t    queue_xp,
    704                              uint32_t  flags,
     715                             uint8_t   flags,
    705716                             uint32_t  channel )
    706717{
     
    715726        // try to register R2T request
    716727        error_t error = remote_buf_put_from_kernel( queue_xp,
    717                                                     (uint8_t *)(&flags),
     728                                                    &flags,
    718729                                                    1 );
    719730        if( error )
     
    740751    }
    741752}  // end socket_put_r2t_request()
     753
     754///////////////////////////////////////////////////
     755error_t socket_get_r2t_request( xptr_t    queue_xp,
     756                                uint8_t * flags )
     757{
     758    // get one request from R2T queue
     759    return remote_buf_get_to_kernel( queue_xp,
     760                                     flags,
     761                                     1 );
     762}  // end socket_get_r2T_request()
    742763 
    743764///////////////////////////////////////////////////
     
    843864    process_t * process = this->process;
    844865
     866#if DEBUG_SOCKET_BIND || DEBUG_SOCKET_ERROR
     867uint32_t cycle = (uint32_t)hal_get_cycles();
     868#endif
     869
    845870#if DEBUG_SOCKET_BIND
    846 uint32_t cycle = (uint32_t)hal_get_cycles();
    847871if( DEBUG_SOCKET_BIND < cycle )
    848872printk("\n[%s] thread[%x,%x] enter / socket[%x,%d] / addr %x / port %x / cycle %d\n",
     
    858882    if( file_xp == XPTR_NULL )
    859883    {
    860         printk("\n[ERROR] in %s : undefined fdid %d / thread[%x,%x]\n",
    861         __FUNCTION__, fdid, process->pid, this->trdid );
     884
     885#if DEBUG_SOCKET_ERROR
     886printk("\n[ERROR] in %s : undefined fdid %d / thread[%x,%x] / cycle %d\n",
     887__FUNCTION__, fdid, process->pid, this->trdid, cycle );
     888#endif
    862889        return -1;
    863890    }
     
    869896    if( file_type != FILE_TYPE_SOCK )
    870897    {
    871         printk("\n[ERROR] in %s : illegal file type %s / thread[%x,%x]",
    872         __FUNCTION__, vfs_inode_type_str( file_type ), process->pid, this->trdid );
     898
     899#if DEBUG_SOCKET_ERROR
     900printk("\n[ERROR] in %s : illegal file type %s / thread[%x,%x] / cycle %d",
     901__FUNCTION__, vfs_inode_type_str( file_type ), process->pid, this->trdid, cycle );
     902#endif
    873903        return -1;
    874904    }
     
    918948    process_t * process = this->process;
    919949
     950#if DEBUG_SOCKET_LISTEN || DEBUG_SOCKET_ERROR
     951uint32_t cycle = (uint32_t)hal_get_cycles();
     952#endif
     953
    920954#if DEBUG_SOCKET_LISTEN
    921 uint32_t cycle = (uint32_t)hal_get_cycles();
    922955if( DEBUG_SOCKET_LISTEN < cycle )
    923956printk("\n[%s] thread[%x,%x] enter / socket[%x,%d] / crq_depth %x / cycle %d\n",
     
    933966    if( file_xp == XPTR_NULL )
    934967    {
    935         printk("\n[ERROR] in %s : undefined fdid %d / thread[%x,%x]\n",
    936         __FUNCTION__, fdid, process->pid, this->trdid );
     968
     969#if DEBUG_SOCKET_ERROR
     970printk("\n[ERROR] in %s : undefined fdid %d / thread[%x,%x] / cycle %d\n",
     971__FUNCTION__, fdid, process->pid, this->trdid, cycle );
     972#endif
    937973        return -1;
    938974    }
     
    944980    if( file_type != FILE_TYPE_SOCK )
    945981    {
    946         printk("\n[ERROR] in %s : illegal file type %s / thread[%x,%x]\n",
    947         __FUNCTION__, vfs_inode_type_str(file_type), process->pid, this->trdid );
     982
     983#if DEBUG_SOCKET_ERROR
     984printk("\n[ERROR] in %s : illegal file type %s / thread[%x,%x] / cycle %d\n",
     985__FUNCTION__, vfs_inode_type_str(file_type), process->pid, this->trdid, cycle );
     986#endif
    948987        return -1;
    949988    }
     
    958997    if( socket_type != SOCK_STREAM )
    959998    {
    960         printk("\n[ERROR] in %s : illegal socket type %s / thread[%x,%x]\n",
    961         __FUNCTION__, socket_type_str(socket_type), process->pid, this->trdid );
     999
     1000#if DEBUG_SOCKET_ERROR
     1001printk("\n[ERROR] in %s : illegal socket type %s / thread[%x,%x] / cycle %d\n",
     1002__FUNCTION__, socket_type_str(socket_type), process->pid, this->trdid, cycle );
     1003#endif
    9621004        return -1;
    9631005    }
     
    9661008    if( socket_state != TCP_STATE_BOUND )
    9671009    {
    968         printk("\n[ERROR] in %s : illegal socket state %s / thread[%x,%x]\n",
    969         __FUNCTION__, socket_state_str(socket_state), process->pid, this->trdid );
     1010
     1011#if DEBUG_SOCKET_ERROR
     1012printk("\n[ERROR] in %s : illegal socket state %s / thread[%x,%x] / cycle %d\n",
     1013__FUNCTION__, socket_state_str(socket_state), process->pid, this->trdid, cycle );
     1014#endif
    9701015        return -1;
    9711016    }
     
    9801025    if( error )
    9811026    {
    982         printk("\n[ERROR] in %s : cannot allocate CRQ queue / thread[%x,%x]\n",
    983         __FUNCTION__, process->pid, this->trdid );
     1027
     1028#if DEBUG_SOCKET_ERROR
     1029printk("\n[ERROR] in %s : cannot allocate CRQ queue / thread[%x,%x] / cycle %d\n",
     1030__FUNCTION__, process->pid, this->trdid, cycle );
     1031#endif
    9841032        return -1;
    9851033    }
     
    10111059    vfs_file_t        * file_ptr;
    10121060    cxy_t               file_cxy;
    1013     vfs_file_type_t    file_type;           // file descriptor type
     1061    vfs_file_type_t     file_type;           // file descriptor type
    10141062    socket_t          * socket_ptr;          // local pointer on remote waiting socket
    10151063    uint32_t            socket_type;         // listening socket type   
     
    10451093    process_t * process   = this->process;
    10461094
    1047 #if DEBUG_SOCKET_ACCEPT
     1095#if DEBUG_SOCKET_ACCEPT || DEBUG_SOCKET_ERROR
    10481096uint32_t cycle = (uint32_t)hal_get_cycles();
     1097#endif
     1098
     1099#if DEBUG_SOCKET_ACCEPT
    10491100if( DEBUG_SOCKET_ACCEPT < cycle )
    10501101printk("\n[%s] thread[%x,%x] enter for socket[%x,%d] / cycle %d\n",
     
    10601111    if( file_xp == XPTR_NULL )
    10611112    {
    1062         printk("\n[ERROR] in %s : undefined fdid %d",
    1063         __FUNCTION__, fdid );
     1113
     1114#if DEBUG_SOCKET_ERROR
     1115printk("\n[ERROR] in %s : undefined fdid %d / thead[%x,%x] / cycle %d",
     1116__FUNCTION__, fdid, process->pid, this->trdid, cycle );
     1117#endif
    10641118        return -1;
    10651119    }
     
    10711125    if( file_type != FILE_TYPE_SOCK )
    10721126    {
    1073         printk("\n[ERROR] in %s : illegal file type %s / thread[%x,%x]\n",
    1074         __FUNCTION__, vfs_inode_type_str(file_type), process->pid, this->trdid );
     1127
     1128#if DEBUG_SOCKET_ERROR
     1129printk("\n[ERROR] in %s : illegal file type %s / thread[%x,%x] / cycle %d\n",
     1130__FUNCTION__, vfs_inode_type_str(file_type), process->pid, this->trdid, cycle );
     1131#endif
    10751132        return -1;
    10761133    }
     
    10971154    if( socket_type != SOCK_STREAM )
    10981155    {
    1099         // release listening socket lock
     1156                   
     1157#if DEBUG_SOCKET_ERROR
     1158printk("\n[ERROR] in %s : illegal socket type %s / thread[%x,%x] / cycle %d\n",
     1159__FUNCTION__, socket_type_str(socket_type), process->pid , this->trdid, cycle );
     1160#endif
    11001161        remote_queuelock_release( socket_lock_xp );
    1101                    
    1102         printk("\n[ERROR] in %s : illegal socket type %s / thread[%x,%x]\n",
    1103         __FUNCTION__, socket_type_str(socket_type), process->pid , this->trdid );
    11041162        return -1;
    11051163    }
     
    11081166    if( socket_state != TCP_STATE_LISTEN )
    11091167    {
    1110         // release listening socket lock
     1168
     1169#if DEBUG_SOCKET_ERROR
     1170printk("\n[ERROR] in %s : illegal socket state %s / thread[%x,%x] / cycle %d\n",
     1171__FUNCTION__, socket_state_str(socket_state), process->pid, this->trdid, cycle );
     1172#endif
    11111173        remote_queuelock_release( socket_lock_xp );
    1112                    
    1113         printk("\n[ERROR] in %s : illegal socket state %s / thread[%x,%x]\n",
    1114         __FUNCTION__, socket_state_str(socket_state), process->pid, this->trdid );
    11151174        return -1;
    11161175    }
     
    11191178    if( (socket_rx_valid == true) || (socket_rx_client != XPTR_NULL) )
    11201179    {
    1121         // release listening socket lock
     1180
     1181#if DEBUG_SOCKET_ERROR
     1182printk("\n[ERROR] in %s : previous RX cmd on socket[%x,%d] / thread[%x,%x] / cycle %d\n",
     1183__FUNCTION__, process->pid, fdid, process->pid, this->trdid, cycle );
     1184#endif
    11221185        remote_queuelock_release( socket_lock_xp );
    1123                    
    1124         printk("\n[ERROR] in %s : previous RX cmd on socket[%x,%d] / thread[%x,%x]\n",
    1125         __FUNCTION__, process->pid, fdid, process->pid, this->trdid );
    11261186        return -1;
    11271187    }
     
    11301190    if( (socket_tx_valid == true) || (socket_tx_client != XPTR_NULL) )
    11311191    {
    1132         // release socket lock
     1192                   
     1193#if DEBUG_SOCKET_ERROR
     1194printk("\n[ERROR] in %s : previous TX cmd on socket[%x,%d] / thread[%x,%x] / cycle %d\n",
     1195__FUNCTION__, process->pid, fdid, process->pid, this->trdid, cycle );
     1196#endif
    11331197        remote_queuelock_release( socket_lock_xp );
    1134                    
    1135         printk("\n[ERROR] in %s : previous TX cmd on socket[%x,%d] / thread[%x,%x]\n",
    1136         __FUNCTION__, process->pid, fdid, process->pid, this->trdid );
    1137         return -1;
    1138     }
    1139 
    1140     // 2) build extended pointer on listening socket.crq
     1198        return -1;
     1199    }
     1200
     1201    // 2) check the listenig socket CRQ
    11411202    crq_xp  = XPTR( file_cxy , &socket_ptr->crqq );
    11421203
     
    11441205    crq_status = remote_buf_status( crq_xp );
    11451206
    1146     // block & deschedule when CRQ empty
     1207    // block & deschedule to wait a client request when CRQ empty
    11471208    if( crq_status == 0 )
    11481209    {
    1149         // register command arguments in listening socket
     1210        // register command arguments for NIC_RX server in listening socket
    11501211        hal_remote_s32( XPTR( file_cxy , &socket_ptr->rx_cmd    ), CMD_RX_ACCEPT );
    11511212        hal_remote_s64( XPTR( file_cxy , &socket_ptr->rx_client ), client_xp );
     
    11791240        crq_status   = remote_buf_status( crq_xp );
    11801241
    1181 assert( __FUNCTION__, (((crq_status > 0) || (cmd_status!= CMD_STS_SUCCESS)) && (cmd_valid == false)),
     1242assert( __FUNCTION__,
     1243(((crq_status > 0) || (cmd_status!= CMD_STS_SUCCESS)) && (cmd_valid == false)),
    11821244"illegal socket state when client thread resumes after RX_ACCEPT" );
    11831245
     
    11871249        if( cmd_status != CMD_STS_SUCCESS )
    11881250        {
    1189             // release socket lock
     1251
     1252#if DEBUG_SOCKET_ERROR
     1253printk("\n[ERROR] in %s : reported for RX / socket[%x,%d] / thread[%x,%x] / cycle %d\n",
     1254__FUNCTION__, process->pid, fdid, process->pid, this->trdid, cycle );
     1255#endif
    11901256            remote_queuelock_release( socket_lock_xp );
    1191 
    1192             printk("\n[ERROR] in %s for RX_ACCEPT command / socket[%x,%d] / thread[%x,%x]\n",
    1193             __FUNCTION__, process->pid, fdid, process->pid, this->trdid );
    11941257            return -1;
    11951258        }
    1196 
    1197         // extract first request from the listening socket CRQ
    1198         error = socket_get_crq_request( crq_xp,
     1259    }  // end blocking on CRQ empty
     1260
     1261    // from this point, we can extract a request from listening socket CRQ
     1262    error = socket_get_crq_request( crq_xp,
    11991263                                    &new_remote_addr,
    12001264                                    &new_remote_port,
    12011265                                    &new_remote_iss,
    12021266                                    &new_remote_window );
    1203 
    12041267assert( __FUNCTION__, (error == 0),
    12051268"cannot get a connection request from a non-empty CRQ" );
    12061269
    1207         // reset listening socket rx_client
    1208         hal_remote_s32( XPTR( file_cxy , &socket_ptr->rx_client ) , XPTR_NULL );
    1209 
    1210         // release socket lock
    1211         remote_queuelock_release( socket_lock_xp );
    1212 
    1213     }  // end blocking on CRQ status
    1214 
    1215     // from this point, we can create a new socket
    1216     // and ask the NIC_TX to send a SYN-ACK segment
     1270    // release listening socket lock
     1271    remote_queuelock_release( socket_lock_xp );
    12171272
    12181273#if DEBUG_SOCKET_ACCEPT
    12191274cycle = (uint32_t)hal_get_cycles();
    12201275if( DEBUG_SOCKET_ACCEPT < cycle )
    1221 printk("\n[%s] thread[%x,%x] socket[%x,%d] / got a CRQ request / cycle %d\n",
    1222 __FUNCTION__, process->pid, this->trdid, process->pid, fdid, cycle );
     1276printk("\n[%s] thread[%x,%x] socket[%x,%d] / CRQ request [addr %x / port %x] / cycle %d\n",
     1277__FUNCTION__, process->pid, this->trdid, process->pid, fdid,
     1278new_remote_addr, new_remote_port, cycle );
    12231279#endif
    12241280
     
    12341290    if( error )
    12351291    {
    1236         printk("\n[ERROR] in %s : cannot allocate new socket / thread[%x,%x]\n",
    1237         __FUNCTION__, process->pid, this->trdid );
     1292
     1293#if DEBUG_SOCKET_ERROR
     1294printk("\n[ERROR] in %s : cannot create new socket / thread[%x,%x] / cycle %d\n",
     1295__FUNCTION__, process->pid, this->trdid, cycle );
     1296#endif
    12381297        return -1;
    12391298    }
     
    12871346 
    12881347    // start retransmission timer
    1289     socket_alarm_start( new_socket_xp , TCP_RETRANSMISSION_TIMEOUT );
     1348    alarm_start( client_xp,
     1349                 hal_get_cycles() + CONFIG_SOCK_RETRY_TIMEOUT,
     1350                 &socket_alarm_handler,
     1351                 new_socket_xp );
    12901352
    12911353#if DEBUG_SOCKET_ACCEPT
    12921354cycle = (uint32_t)hal_get_cycles();
    12931355if( DEBUG_SOCKET_ACCEPT < cycle )
    1294 printk("\n[%s] thread[%x,%x] new_socket[%x,%d] blocks on <IO> waiting ESTAB / cycle %d\n",
     1356printk("\n[%s] thread[%x,%x] for socket[%x,%d] request SYN-ACK & blocks on <IO> / cycle %d\n",
    12951357__FUNCTION__, process->pid, this->trdid, process->pid, new_fdid, cycle );
    12961358#endif
     
    13071369#endif
    13081370
    1309     // stop retransmission timer
    1310     socket_alarm_stop();
     1371    // stop retransmission timer in thread descriptor
     1372    alarm_stop( client_xp );
    13111373
    13121374    // get new socket state, tx_valid and tx_sts
     
    13151377    cmd_status = hal_remote_l32( XPTR( new_socket_cxy , &new_socket_ptr->tx_sts ));
    13161378
    1317 assert( __FUNCTION__, (((new_state == TCP_STATE_ESTAB) || (cmd_status != CMD_STS_SUCCESS))
    1318         && (cmd_valid == false)),
     1379assert( __FUNCTION__,
     1380(((new_state == TCP_STATE_ESTAB) || (cmd_status != CMD_STS_SUCCESS)) && (cmd_valid == false)),
    13191381"illegal socket state when client thread resumes after TX_ACCEPT" );
    13201382
     
    13241386    if( cmd_status != CMD_STS_SUCCESS )
    13251387    {
    1326         printk("\n[ERROR] in %s for TX_ACCEPT command / socket[%x,%d] / thread[%x,%x]\n",
    1327         __FUNCTION__, process->pid, new_fdid, process->pid, this->trdid );
     1388
     1389#if DEBUG_SOCKET_ERROR
     1390printk("\n[ERROR] in %s reported for TX / socket[%x,%d] / thread[%x,%x] / cycle %d\n",
     1391__FUNCTION__, process->pid, new_fdid, process->pid, this->trdid, cycle );
     1392#endif
    13281393        return -1;
    13291394    }
     
    13701435    trdid_t     trdid     = this->trdid;
    13711436
     1437#if DEBUG_SOCKET_CONNECT || DEBUG_SOCKET_ERROR
     1438uint32_t cycle = (uint32_t)hal_get_cycles();
     1439#endif
     1440
    13721441    // get pointers on file descriptor
    13731442    xptr_t       file_xp  = process_fd_get_xptr_from_local( this->process , fdid );
     
    13781447    if( file_xp == XPTR_NULL )
    13791448    {
    1380         printk("\n[ERROR] in %s : undefined fdid %d",
    1381         __FUNCTION__, fdid );
     1449
     1450#if DEBUG_SOCKET_ERROR
     1451printk("\n[ERROR] in %s : undefined fdid %d / thread[%x,%x] / cycle %d",
     1452__FUNCTION__, fdid, pid, trdid, cycle );
     1453#endif
    13821454        return -1;
    13831455    }
     
    13881460
    13891461#if DEBUG_SOCKET_CONNECT
    1390 uint32_t cycle = (uint32_t)hal_get_cycles();
    13911462if( DEBUG_SOCKET_CONNECT < cycle )
    1392 printk("\n[%s] thread[%x,%x] enter for socket[%x,%d] / addr %x / port %d / cycle %d\n",
     1463printk("\n[%s] thread[%x,%x] enter for socket[%x,%d] / addr %x / port %x / cycle %d\n",
    13931464__FUNCTION__,  pid, trdid, pid, fdid, remote_addr, remote_port, cycle );
    13941465#endif
     
    13971468    if( file_type != FILE_TYPE_SOCK )
    13981469    {
    1399         printk("\n[ERROR] in %s : illegal file type %s",
    1400         __FUNCTION__, vfs_inode_type_str( file_type ) );
     1470
     1471#if DEBUG_SOCKET_ERROR
     1472printk("\n[ERROR] in %s : illegal file type %s / thread[%x,%x] / cycle %d",
     1473__FUNCTION__, vfs_inode_type_str( file_type ), pid, trdid, cycle );
     1474#endif
    14011475        return -1;
    14021476    }
     
    14121486        if( socket_state != UDP_STATE_BOUND )
    14131487        {
    1414             printk("\n[ERROR] in %s : illegal socket state %s for type %s",
    1415             __FUNCTION__, socket_state_str(socket_state), socket_type_str(socket_type) );
     1488
     1489#if DEBUG_SOCKET_ERROR
     1490printk("\n[ERROR] in %s : illegal socket state %s for type %s / thread[%x,%x] / cycle %d",
     1491__FUNCTION__, socket_state_str(socket_state), socket_type_str(socket_type), pid, trdid, cycle );
     1492#endif
    14161493            return -1;
    14171494        }
     
    14211498        if( socket_state != TCP_STATE_BOUND )
    14221499        {
    1423             printk("\n[ERROR] in %s : illegal socket state %s for type %s",
    1424             __FUNCTION__, socket_state_str(socket_state), socket_type_str(socket_type) );
     1500
     1501#if DEBUG_SOCKET_ERROR
     1502printk("\n[ERROR] in %s : illegal socket state %s for type %s / thread[%x,%x] / cycle %d",
     1503__FUNCTION__, socket_state_str(socket_state), socket_type_str(socket_type), pid, trdid, cycle );
     1504#endif
    14251505            return -1;
    14261506        }
     
    14281508    else
    14291509    {
    1430         printk("\n[ERROR] in %s : illegal socket type %s",
    1431         __FUNCTION__,  socket_type_str(socket_type) );
     1510
     1511#if DEBUG_SOCKET_ERROR
     1512printk("\n[ERROR] in %s : illegal socket type / thread[%x,%x] / cycle %d",
     1513__FUNCTION__, pid, trdid, cycle );
     1514#endif
    14321515        return -1;
    14331516    }
     
    14751558 
    14761559        // start retransmission timer
    1477         socket_alarm_start( socket_xp , TCP_RETRANSMISSION_TIMEOUT );
     1560        alarm_start( client_xp,
     1561                     hal_get_cycles() + CONFIG_SOCK_RETRY_TIMEOUT,
     1562                     &socket_alarm_handler,
     1563                     socket_xp );
    14781564
    14791565#if DEBUG_SOCKET_CONNECT
     
    14941580#endif
    14951581
    1496         // stop retransmission timer
    1497         socket_alarm_stop();
     1582        // stop retransmission timer in thread descriptor
     1583        alarm_stop( client_xp );
    14981584
    14991585        // get socket state, tx_valid and tx_sts
     
    15071593
    15081594        // reset socket.tx_client
    1509         hal_remote_s32( XPTR( file_cxy , &socket_ptr->tx_client ) , XPTR_NULL );
     1595        hal_remote_s64( XPTR( file_cxy , &socket_ptr->tx_client ) , XPTR_NULL );
    15101596
    15111597        if( cmd_status != CMD_STS_SUCCESS )
    15121598        {
    1513             printk("\n[ERROR] in %s : for command TX_CONNECT / socket[%x,%d] / thread[%x,%x]\n",
    1514             __FUNCTION__, pid, fdid, pid, trdid );
     1599
     1600#if DEBUG_SOCKET_ERROR
     1601printk("\n[ERROR] in %s reported by server / socket[%x,%d] / thread[%x,%x] / cycle %d\n",
     1602__FUNCTION__, pid, fdid, pid, trdid, cycle );
     1603#endif
    15151604            return -1;
    15161605        }
     
    15481637    trdid_t      trdid     = this->trdid;
    15491638
     1639#if DEBUG_SOCKET_CLOSE || DEBUG_SOCKET_ERROR
     1640uint32_t cycle = (uint32_t)hal_get_cycles();
     1641#endif
     1642
    15501643    // get pointers on socket descriptor
    15511644    cxy_t        file_cxy   = GET_CXY( file_xp );
     
    15581651
    15591652#if DEBUG_SOCKET_CLOSE
    1560 uint32_t cycle = (uint32_t)hal_get_cycles();
    15611653if (DEBUG_SOCKET_CLOSE < cycle )
    15621654printk("\n[%s] thread[%x,%x] enters for socket[%x,%d] / cycle %d\n",
     
    15741666        (hal_remote_l64( XPTR( file_cxy , &socket_ptr->tx_client)) != XPTR_NULL) )
    15751667    {
    1576         // release socket lock
     1668                   
     1669#if DEBUG_SOCKET_ERROR
     1670printk("\n[ERROR] in %s : previous TX cmd on socket[%x,%d] / thread[%x,%x] / cycle %d\n",
     1671__FUNCTION__, pid, fdid, pid, trdid, cycle );
     1672#endif
    15771673        remote_queuelock_release( socket_lock_xp );
    1578                    
    1579         printk("\n[ERROR] in %s : previous TX cmd on socket[%x,%d] / thread[%x,%x]\n",
    1580         __FUNCTION__, pid, fdid, pid, trdid );
    15811674        return -1;
    15821675    }
     
    16451738
    16461739        // start retransmission timer
    1647         socket_alarm_start( socket_xp , TCP_RETRANSMISSION_TIMEOUT );
     1740        alarm_start( client_xp,
     1741                     hal_get_cycles() + CONFIG_SOCK_RETRY_TIMEOUT,
     1742                     &socket_alarm_handler,
     1743                     socket_xp );
    16481744
    16491745#if DEBUG_SOCKET_CLOSE
     
    16631759__FUNCTION__, pid, trdid, pid, fdid, cycle );
    16641760#endif
    1665         // stop retransmission timer
    1666         socket_alarm_stop();
     1761        // stop retransmission timer in thread descriptor
     1762        alarm_stop( client_xp );
    16671763
    16681764        // take socket lock
     
    16741770        cmd_valid    = hal_remote_l32( XPTR( file_cxy , &socket_ptr->tx_valid ) );
    16751771
    1676 assert( __FUNCTION__, (((socket_state == TCP_STATE_CLOSED) || (cmd_status != CMD_STS_SUCCESS))
    1677          && (cmd_valid == false)),
    1678 "illegal socket state when client thread resumes after TX_CLOSE\n"
    1679 " socket_state = %s / cmd_status = %d / cmd_valid = %d\n",
     1772assert( __FUNCTION__,
     1773(((socket_state == TCP_STATE_CLOSED) || (cmd_status != CMD_STS_SUCCESS)) && (cmd_valid == false)),
     1774" socket_state = %s / cmd_status = %d / cmd_valid = %d",
    16801775socket_state_str(socket_state), cmd_status, cmd_valid );
    16811776
    16821777        // reset socket.tx_client
    1683         hal_remote_s32( XPTR( file_cxy , &socket_ptr->tx_client ) , XPTR_NULL );
     1778        hal_remote_s64( XPTR( file_cxy , &socket_ptr->tx_client ) , XPTR_NULL );
    16841779
    16851780        if( cmd_status != CMD_STS_SUCCESS )  // error reported
    16861781        {
    1687             printk("\n[ERROR] in %s for command TX_CLOSE / socket[%x,%d] / thread[%x,%x]\n",
    1688             __FUNCTION__, pid, fdid, pid, this->trdid );
     1782
     1783#if DEBUG_SOCKET_ERROR
     1784printk("\n[ERROR] in %s for command TX_CLOSE / socket[%x,%d] / thread[%x,%x] / cycle %d\n",
     1785__FUNCTION__, pid, fdid, pid, this->trdid, cycle );
     1786#endif
    16891787            return -1;
    16901788        }
     
    17081806////////////////////////////////////////////////////////////////////////////////////////
    17091807// This static function is called by the two functions socket_send() & socket_recv().
    1710 // It can be used for both UDP and TCP sockets.
     1808// It is used for both UDP and TCP sockets.
    17111809////////////////////////////////////////////////////////////////////////////////////////
    17121810// @ is_send   : send when true / receive when false.
    17131811// @ fdid      : socket identifier.
    17141812// @ u_buf     : pointer on user buffer in user space.
    1715 // @ length    : number of bytes.
     1813// @ length    : number of bytes in buffer.
    17161814////////////////////////////////////////////////////////////////////////////////////////
    17171815// Implementation note : The behavior is different for SEND & RECV
     
    17491847    chdev_t           * chdev_ptr;
    17501848    cxy_t               chdev_cxy;
    1751     uint32_t            buf_status;      // number of bytes in rx_buf
    17521849    int32_t             moved_bytes;     // total number of moved bytes (fot return)
    1753     xptr_t              server_xp;       // extended pointer on NIC_TX / NIC_RX server thread
    1754     thread_t          * server_ptr;      // local pointer on NIC_TX / NIC_RX server thread
    1755     kmem_req_t          req;             // KCM request for TX kernel buffer
    1756     uint8_t           * tx_buf;          // kernel buffer for TX transfer
    1757     bool_t              cmd_valid;       // from socket descriptor
    1758     uint32_t            cmd_status;      // from socket descriptor
    1759     uint32_t            tx_todo;         // from socket descriptor
     1850    xptr_t              server_xp;       // ext pointer on NIC_TX / NIC_RX thread
     1851    thread_t          * server_ptr;      // local pointer on NIC_TX / NIC_RX thread
     1852    uint8_t           * tx_buf;          // pointer on kernel buffer for TX transfer
     1853    bool_t              cmd_valid;       // RX or TX command from socket descriptor
     1854    uint32_t            cmd_sts;         // RX or TX command from socket descriptor
     1855    uint32_t            tx_todo;         // number of bytes still to send
     1856    xptr_t              rx_buf_xp;       // extended pointer on socket rx_buf
     1857    uint32_t            rx_buf_sts;      // current status of socket rx_buf
    17601858
    17611859    thread_t  * this    = CURRENT_THREAD;
    17621860    process_t * process = this->process;
     1861
     1862#if DEBUG_SOCKET_SEND || DEBUG_SOCKET_RECV || DEBUG_SOCKET_ERROR
     1863uint32_t cycle = (uint32_t)hal_get_cycles();
     1864#endif
     1865
     1866#if DEBUG_SOCKET_SEND || DEBUG_SOCKET_RECV
     1867if( is_send )
     1868printk("\n[%s] thread[%x,%x] socket[%x,%d] enter : SEND / buf %x / length %d / cycle %d\n",
     1869__FUNCTION__, process->pid, this->trdid, process->pid, fdid, u_buf, length, cycle );
     1870else
     1871printk("\n[%s] thread[%x,%x] socket[%x,%d] enter : RECV / buf %x / length %d / cycle %d\n",
     1872__FUNCTION__, process->pid, this->trdid, process->pid, fdid, u_buf, length, cycle );
     1873#endif
    17631874
    17641875    // build extended pointer on client thread
     
    17721883    if( file_xp == XPTR_NULL )
    17731884    {
    1774         printk("\n[ERROR] in %s : undefined fdid %d / thread%x,%x]\n",
    1775         __FUNCTION__, fdid , process->pid, this->trdid );
     1885
     1886#if DEBUG_SOCKET_ERROR
     1887printk("\n[ERROR] in %s : undefined fdid %d / thread%x,%x] / cycle %d\n",
     1888__FUNCTION__, fdid , process->pid, this->trdid, cycle );
     1889#endif
    17761890        return -1;
    17771891    }
     
    17871901    if( file_type != FILE_TYPE_SOCK )
    17881902    {
    1789         printk("\n[ERROR] in %s : illegal file type %s / socket[%x,%d]\n",
    1790         __FUNCTION__, vfs_inode_type_str(file_type), process->pid, fdid );
     1903
     1904#if DEBUG_SOCKET_ERROR
     1905printk("\n[ERROR] in %s : illegal file type thread[%x,%x] / cycle %d\n",
     1906__FUNCTION__, process->pid, this->trdid, cycle );
     1907#endif
    17911908        return -1;
    17921909    }
     
    18031920    nic_channel  = hal_remote_l32( XPTR( file_cxy , &socket_ptr->nic_channel ));
    18041921
    1805     /////////////
     1922    //////////////////////////////////////////////////////
    18061923    if( is_send )                       // SEND command
    18071924    {
    18081925
    18091926#if DEBUG_SOCKET_SEND
    1810 uint32_t    cycle = (uint32_t)hal_get_cycles();
     1927cycle = (uint32_t)hal_get_cycles();
    18111928if (DEBUG_SOCKET_SEND < cycle )
    1812 printk("\n[%s] thread[%x,%x] received SEND command for socket[%x,%d] / length %d / cycle %d\n",
     1929printk("\n[%s] thread[%x,%x] / socket[%x,%d] get SEND / length %d / cycle %d\n",
    18131930__FUNCTION__, process->pid, this->trdid, process->pid, fdid, length, cycle );
    18141931#endif
     1932
    18151933        // check no previous TX command
    1816         if( (hal_remote_l32( XPTR( file_cxy , &socket_ptr->tx_valid )) == true) ||
    1817             (hal_remote_l64( XPTR( file_cxy , &socket_ptr->tx_client)) != XPTR_NULL) )
     1934        if( hal_remote_l32( XPTR( file_cxy , &socket_ptr->tx_valid )) == true )
    18181935        {
    1819             // release socket lock
     1936                   
     1937#if DEBUG_SOCKET_ERROR
     1938printk("\n[ERROR] in %s : previous TX command / socket[%x,%d] / thread[%x,%x] / cycle %d\n",
     1939__FUNCTION__, process->pid, fdid, process->pid, this->trdid, cycle );
     1940#endif
    18201941            remote_queuelock_release( socket_lock_xp );
    1821                    
    1822             printk("\n[ERROR] in %s : previous TX command / socket[%x,%d] / thread[%x,%x]\n",
    1823             __FUNCTION__, process->pid, fdid, process->pid, this->trdid );
    18241942            return -1;
    18251943        }
    18261944
    1827         // allocate a temporary kernel buffer
    1828         req.type  = KMEM_KCM;
    1829         req.order = bits_log2( length );
    1830         req.flags = AF_NONE;
    1831         tx_buf    = kmem_alloc( &req ); 
    1832 
    1833         if( tx_buf == NULL )
    1834         {
    1835             // release socket lock
    1836             remote_queuelock_release( socket_lock_xp );
    1837                    
    1838             printk("\n[ERROR] in %s : no memory for tx_buf / socket[%x,%d] / thread[%x,%x]\n",
    1839             __FUNCTION__, process->pid, fdid, process->pid, this->trdid );
    1840             return -1;
    1841         }
    1842 
    1843         // copy data from user u_buf to kernel tx_buf   
    1844         hal_copy_from_uspace( XPTR( local_cxy , tx_buf ),
     1945        // get tx_buf pointer from socket pointer
     1946        tx_buf = (uint8_t*)hal_remote_lpt( XPTR( file_cxy , &socket_ptr->tx_buf ));
     1947
     1948        // copy data from user u_buf to kernel socket tx_buf   
     1949        hal_copy_from_uspace( XPTR( file_cxy , tx_buf ),
    18451950                              u_buf,
    18461951                              length );
     1952#if DEBUG_SOCKET_SEND
     1953if (DEBUG_SOCKET_SEND < cycle )
     1954printk("\n[%s] thread[%x,%x] / socket[%x,%d] copied %d bytes to tx_buf (%x,%x)\n",
     1955__FUNCTION__, process->pid, this->trdid, process->pid, fdid, length, file_cxy, tx_buf );
     1956putb("tx_buf : 16 first data bytes" , tx_buf , 16 );
     1957#endif
    18471958
    18481959        // register command in socket descriptor
     
    18521963        hal_remote_s32( XPTR( file_cxy , &socket_ptr->tx_len    ) , length );
    18531964        hal_remote_s32( XPTR( file_cxy , &socket_ptr->tx_todo   ) , length );
     1965        hal_remote_s32( XPTR( file_cxy , &socket_ptr->tx_ack    ) , 0 );
    18541966        hal_remote_s32( XPTR( file_cxy , &socket_ptr->tx_valid  ) , true );
    18551967
     
    18691981        thread_unblock( server_xp , THREAD_BLOCKED_CLIENT );
    18701982
    1871         // start retransmission timer
    1872         socket_alarm_start( socket_xp , TCP_RETRANSMISSION_TIMEOUT );
     1983        // start retransmission timer for TCP socket
     1984        if( socket_type == SOCK_STREAM ) 
     1985        {
     1986            alarm_start( client_xp,
     1987                         hal_get_cycles() + CONFIG_SOCK_RETRY_TIMEOUT,
     1988                         &socket_alarm_handler,
     1989                         socket_xp );
     1990        }
    18731991
    18741992#if DEBUG_SOCKET_SEND   
    1875 cycle = (uint32_t)hal_get_cycles();
    18761993if( DEBUG_SOCKET_SEND < cycle )
    1877 printk("\n[%s] thread[%x,%x] socket[%x,%d] register SEND => blocks on <IO> / cycle %d\n",
    1878 __FUNCTION__, process->pid, this->trdid, process->pid, fdid, cycle );
     1994printk("\n[%s] thread[%x,%x] / socket[%x,%d] registers SEND => blocks on <IO>\n",
     1995__FUNCTION__, process->pid, this->trdid, process->pid, fdid );
    18791996#endif
    18801997        // client thread blocks itself and deschedules
     
    18852002cycle = (uint32_t)hal_get_cycles();
    18862003if( DEBUG_SOCKET_SEND < cycle )
    1887 printk("\n[%s] thread[%x,%x] socket[%x,%d] for SEND resumes / cycle %d\n",
     2004printk("\n[%s] thread[%x,%x] / socket[%x,%d] resumes for SEND / cycle %d\n",
    18882005__FUNCTION__, process->pid, this->trdid, process->pid, fdid, cycle );
    18892006#endif
    1890         // stop retransmission timer
    1891         socket_alarm_stop();
    1892 
    1893         // take socket lock
     2007        // stop retransmission timer for TCP socket
     2008        if( socket_type == SOCK_STREAM )
     2009        {
     2010            alarm_stop( client_xp );
     2011        }
     2012
     2013        // take socket lock
    18942014        remote_queuelock_acquire( socket_lock_xp );
    18952015     
     
    18972017        tx_todo    = hal_remote_l32( XPTR( file_cxy , &socket_ptr->tx_todo ));
    18982018        cmd_valid  = hal_remote_l32( XPTR( file_cxy , &socket_ptr->tx_valid ));
    1899         cmd_status = hal_remote_l32( XPTR( file_cxy , &socket_ptr->tx_sts ));
     2019        cmd_sts    = hal_remote_l32( XPTR( file_cxy , &socket_ptr->tx_sts ));
    19002020
    19012021        // reset tx_client in socket descriptor
     
    19062026     
    19072027// check SEND command completed when TX client thread resumes
    1908 assert( __FUNCTION__, (((tx_todo == 0) || (cmd_status != CMD_STS_SUCCESS)) && (cmd_valid == false)),
    1909 "illegal socket state when client thread resumes after TX_SEND\n"
    1910 " tx_todo = %d / tx_status = %d / tx_valid = %d\n",
    1911 tx_todo, cmd_status, cmd_valid );
    1912 
    1913         // release the tx_buf
    1914         req.ptr = tx_buf;
    1915         kmem_free( &req );
    1916 
    1917         if( cmd_status != CMD_STS_SUCCESS )
     2028assert( __FUNCTION__,
     2029(((tx_todo == 0) || (cmd_sts != CMD_STS_SUCCESS)) && (cmd_valid == false)),
     2030"client thread resumes from SEND / bad state : tx_todo %d / tx_sts %d / tx_valid %d",
     2031tx_todo, cmd_sts, cmd_valid );
     2032
     2033        if( cmd_sts != CMD_STS_SUCCESS )
    19182034        {
    19192035
    1920 #if DEBUG_SOCKET_SEND
    1921 cycle = (uint32_t)hal_get_cycles();
    1922 if( DEBUG_SOCKET_RECV < cycle )
    1923 printk("\n[%s] error %s for TX_SEND / socket[%x,%d] / thread[%x,%x]\n",
    1924 __FUNCTION__, socket_cmd_sts_str(cmd_status), process->pid, fdid, process->pid, this->trdid );
     2036#if DEBUG_SOCKET_ERROR   
     2037printk("\n[ERROR] in %s : reported for SEND / socket[%x,%d] / thread[%x,%x] / cycle %d\n",
     2038__FUNCTION__, process->pid, fdid, process->pid, this->trdid, cycle );
    19252039#endif
    19262040            return -1;
     
    19322046cycle = (uint32_t)hal_get_cycles();
    19332047if (DEBUG_SOCKET_SEND < cycle )
    1934 printk("\n[%s] thread[%x,%x] success for SEND / socket[%x,%d] / length %d / cycle %d\n",
     2048printk("\n[%s] thread[%x,%x] SEND success / socket[%x,%d] / bytes %d / cycle %d\n",
    19352049__FUNCTION__, process->pid, this->trdid, process->pid, fdid, length, cycle );
    19362050#endif
     
    19402054    }  // end SEND command
    19412055
    1942     ////
    1943     else                                 // RECV command
     2056    /////////////////////////////////////////////////////////////
     2057    else                                       // RECV command
    19442058    {
    19452059
    19462060#if DEBUG_SOCKET_RECV
    1947 uint32_t    cycle = (uint32_t)hal_get_cycles();
    1948 if (DEBUG_SOCKET_SEND < cycle )
    1949 printk("\n[%s] thread[%x,%x] received RECV command for socket[%x,%d] / length %d / cycle %d\n",
     2061if (DEBUG_SOCKET_RECV < cycle )
     2062printk("\n[%s] thread[%x,%x] / socket[%x,%d] get RECV / length %d / cycle %d\n",
    19502063__FUNCTION__, process->pid, this->trdid, process->pid, fdid, length, cycle );
    19512064#endif
    19522065        // check no previous RX command
    1953         if( (hal_remote_l32( XPTR( file_cxy , &socket_ptr->rx_valid )) == true) ||
    1954             (hal_remote_l64( XPTR( file_cxy , &socket_ptr->rx_client)) != XPTR_NULL) )
     2066        if( hal_remote_l32( XPTR( file_cxy , &socket_ptr->rx_valid )) == true )
    19552067        {
    1956             // release socket lock
     2068                   
     2069#if DEBUG_SOCKET_ERROR   
     2070printk("\n[ERROR] in %s : previous RX command on socket[%x,%d] / thread[%x,%x] / cycle %d\n",
     2071__FUNCTION__, process->pid, fdid, process->pid, this->trdid, cycle );
     2072#endif
    19572073            remote_queuelock_release( socket_lock_xp );
    1958                    
    1959             printk("\n[ERROR] in %s : previous RX command on socket[%x,%d] / thread[%x,%x]\n",
    1960             __FUNCTION__, process->pid, fdid, process->pid, this->trdid );
    19612074            return -1;
    19622075        }
     
    19692082                   
    19702083#if DEBUG_SOCKET_RECV 
    1971 uint32_t cycle = (uint32_t)hal_get_cycles();
     2084cycle = (uint32_t)hal_get_cycles();
    19722085if( DEBUG_SOCKET_RECV < cycle )
    1973 printk("\n[%s] thread[%x,%x] socket[%x,%d] TCP connection closed / cycle %d\n",
     2086printk("\n[%s] thread[%x,%x] / socket[%x,%d] TCP connection closed / cycle %d\n",
    19742087__FUNCTION__, process->pid, this->trdid, process->pid, fdid, cycle );
    19752088#endif
    19762089            return 0;
    19772090        }
    1978         // build extended pointer on socket.rx_buf
    1979         xptr_t rx_buf_xp   = XPTR( file_cxy , &socket_ptr->rx_buf );
    1980 
    1981         // get rx_buf status
    1982         buf_status = remote_buf_status( rx_buf_xp );
    1983 
    1984         if( buf_status == 0 )
     2091
     2092        // build extended pointer on socket rx_buf
     2093        rx_buf_xp = XPTR( file_cxy , &socket_ptr->rx_buf );
     2094
     2095        // get socket rx_buf status
     2096        rx_buf_sts = remote_buf_status( rx_buf_xp );
     2097
     2098        // register RECV command and deschedule when rx_buf empty
     2099        if( rx_buf_sts == 0 )
    19852100        {
    19862101            // registers RX_RECV command in socket descriptor
     
    19932108
    19942109#if DEBUG_SOCKET_RECV 
    1995 uint32_t cycle = (uint32_t)hal_get_cycles();
    19962110if( DEBUG_SOCKET_RECV < cycle )
    1997 printk("\n[%s] thread[%x,%x] socket[%x,%d] rx_buf empty => blocks on <IO> / cycle %d\n",
    1998 __FUNCTION__, process->pid, this->trdid, process->pid, fdid, cycle );
     2111printk("\n[%s] thread[%x,%x] socket[%x,%d] for RECV : rx_buf empty => blocks on <IO>\n",
     2112__FUNCTION__, process->pid, this->trdid, process->pid, fdid );
    19992113#endif
    20002114            // client thread blocks itself and deschedules
     
    20052119cycle = (uint32_t)hal_get_cycles();
    20062120if( DEBUG_SOCKET_RECV < cycle )
    2007 printk("\n[%s] thread[%x,%x] socket[%x,%d] for RECV resumes / cycle %d\n",
     2121printk("\n[%s] thread[%x,%x] socket[%x,%d] for RECV : resumes / cycle %d\n",
    20082122__FUNCTION__, process->pid, this->trdid, process->pid, fdid, cycle );
    20092123#endif
     
    20112125            remote_queuelock_acquire( socket_lock_xp );
    20122126
    2013             // get rx_sts and rx_buf status
     2127            // get command status, command valid, and rx_buf status
    20142128            cmd_valid  = hal_remote_l32( XPTR( file_cxy , &socket_ptr->rx_valid ));
    2015             cmd_status = hal_remote_l32( XPTR( file_cxy , &socket_ptr->rx_sts ));
    2016             buf_status = remote_buf_status( rx_buf_xp );
     2129            cmd_sts    = hal_remote_l32( XPTR( file_cxy , &socket_ptr->rx_sts ));
     2130            rx_buf_sts = remote_buf_status( rx_buf_xp );
    20172131       
    2018 assert( __FUNCTION__, (((buf_status != 0) || (cmd_status != CMD_STS_SUCCESS)) && (cmd_valid == false)),
    2019 "illegal socket state when client thread resumes after RX_RECV\n"
    2020 " buf_status = %d / rx_sts = %d / rx_valid = %d\n",
    2021 buf_status , cmd_status , cmd_valid );
    2022 
    2023             // reset rx_client in socket descriptor
    2024             hal_remote_s64( XPTR( file_cxy , &socket_ptr->rx_client  ) , XPTR_NULL );
    2025 
    2026             // reset rx_buf for an UDP socket
    2027             if( socket_type == SOCK_DGRAM ) remote_buf_reset( rx_buf_xp );
    2028 
    2029             // release socket lock
    2030             remote_queuelock_release( socket_lock_xp );
    2031 
    2032             if( cmd_status == CMD_STS_EOF )           // EOF (remote close) reported
     2132assert( __FUNCTION__, (cmd_valid == false),
     2133"client thread resumes from RECV but rx_valid is true" );
     2134
     2135            if( cmd_sts == CMD_STS_EOF )           // EOF reported by RX server
    20332136            {
    20342137
    20352138#if DEBUG_SOCKET_RECV
    2036 cycle = (uint32_t)hal_get_cycles();
    20372139if( DEBUG_SOCKET_RECV < cycle )
    2038 printk("\n[%s] EOF for RX_RECV / socket[%x,%d] / thread[%x,%x]\n",
     2140printk("\n[%s] EOF received for socket[%x,%d] / thread[%x,%x]\n",
    20392141__FUNCTION__, process->pid, fdid, process->pid, this->trdid );
    20402142#endif
     2143                // release socket lock
     2144                remote_queuelock_release( socket_lock_xp );
     2145
    20412146                return 0;
    20422147            }
    2043             else if( cmd_status != CMD_STS_SUCCESS )   // other error reported
     2148            else if( cmd_sts != CMD_STS_SUCCESS )   // error reported by RX server
    20442149            {
    20452150
    2046 #if DEBUG_SOCKET_RECV
    2047 cycle = (uint32_t)hal_get_cycles();
    2048 if( DEBUG_SOCKET_RECV < cycle )
    2049 printk("\n[%s] error %s for RX_RECV / socket[%x,%d] / thread[%x,%x]\n",
    2050 __FUNCTION__, socket_cmd_sts_str(cmd_status), process->pid, fdid, process->pid, this->trdid );
    2051 #endif
     2151#if DEBUG_SOCKET_ERROR
     2152printk("\n[ERROR] in %s : rx_server for socket[%x,%d] / thread[%x,%x] / cycle %d\n",
     2153__FUNCTION__, process->pid, fdid, process->pid, this->trdid, cycle );
     2154#endif
     2155                // release socket lock
     2156                remote_queuelock_release( socket_lock_xp );
     2157
    20522158                return -1;
    20532159            }
    2054 
     2160            else if( rx_buf_sts == 0 )              // annormally empty rx_buf
     2161            {
     2162
     2163#if DEBUG_SOCKET_ERROR
     2164printk("\n[ERROR] in %s : rx_buf empty for socket[%x,%d] / thread[%x,%x] / cycle %d\n",
     2165__FUNCTION__, process->pid, fdid, process->pid, this->trdid, cycle );
     2166#endif
     2167                // release socket lock
     2168                remote_queuelock_release( socket_lock_xp );
     2169
     2170                return -1;
     2171            }
    20552172        }
    20562173
    20572174        // number of bytes extracted from rx_buf cannot be larger than u_buf size
    2058         moved_bytes = ( length < buf_status ) ? length : buf_status;
     2175        moved_bytes = ( length < rx_buf_sts ) ? length : rx_buf_sts;
    20592176
    20602177        // move data from kernel rx_buf to user u_buf
     
    20622179                                u_buf,
    20632180                                moved_bytes );
    2064 #if DEBUG_SOCKET_SEND
    2065 cycle = (uint32_t)hal_get_cycles();
    2066 if (DEBUG_SOCKET_SEND < cycle )
    2067 printk("\n[%s] thread[%x,%x] success for RECV / socket[%x,%d] / length %d / cycle %d\n",
     2181#if DEBUG_SOCKET_RECV
     2182cycle = (uint32_t)hal_get_cycles();
     2183if (DEBUG_SOCKET_RECV < cycle )
     2184printk("\n[%s] thread[%x,%x] : RECV success / socket[%x,%d] / bytes %d / cycle %d\n",
    20682185__FUNCTION__, process->pid, this->trdid, process->pid, fdid, moved_bytes, cycle );
    20692186#endif
     2187        // release socket lock
     2188        remote_queuelock_release( socket_lock_xp );
     2189
    20702190        return moved_bytes;
    20712191
     
    20952215} // end socket_recv()
    20962216
     2217////////////////////////////////////
     2218int socket_sendto( uint32_t    fdid,
     2219                   uint8_t   * u_buf,
     2220                   uint32_t    length,
     2221                   uint32_t    remote_ip,
     2222                   uint16_t    remote_port )
     2223{
     2224    printk("\n[ERROR] in %s : this function is not implemented yet\n",
     2225    __FUNCTION__, fdid, u_buf, length, remote_ip, remote_port );
     2226    return -1;
     2227
     2228}  // end socket_sendto()
     2229
     2230//////////////////////////////////////
     2231int socket_recvfrom( uint32_t    fdid,
     2232                     uint8_t   * u_buf,
     2233                     uint32_t    length,
     2234                     uint32_t    remote_ip,
     2235                     uint16_t    remote_port )
     2236{
     2237    printk("\n[ERROR] in %s : this function is not implemented yet\n",
     2238    __FUNCTION__, fdid, u_buf, length, remote_ip, remote_port );
     2239    return -1;
     2240
     2241} // end socket_recvfrom()
     2242
    20972243////////////////////////////////////////////
    20982244void socket_display( xptr_t       socket_xp,
    2099                      const char * func_str )
    2100 {
     2245                     const char * func_str,
     2246                     const char * string )
     2247{
     2248    uint32_t   cycle = (uint32_t)hal_get_cycles();
     2249
    21012250    socket_t * socket = GET_PTR( socket_xp );
    21022251    cxy_t      cxy    = GET_CXY( socket_xp );
     
    21112260    uint32_t   remote_port = hal_remote_l32( XPTR( cxy , &socket->remote_port ));
    21122261    uint32_t   tx_valid    = hal_remote_l32( XPTR( cxy , &socket->tx_valid ));
     2262    xptr_t     tx_client   = hal_remote_l64( XPTR( cxy , &socket->tx_client ));
    21132263    uint32_t   tx_cmd      = hal_remote_l32( XPTR( cxy , &socket->tx_cmd ));
    21142264    uint32_t   tx_sts      = hal_remote_l32( XPTR( cxy , &socket->tx_sts ));
     
    21182268    uint32_t   tx_nxt      = hal_remote_l32( XPTR( cxy , &socket->tx_nxt ));         
    21192269    uint32_t   tx_wnd      = hal_remote_l32( XPTR( cxy , &socket->tx_wnd ));         
     2270    uint32_t   tx_ack      = hal_remote_l32( XPTR( cxy , &socket->tx_ack ));         
    21202271    uint32_t   rx_valid    = hal_remote_l32( XPTR( cxy , &socket->rx_valid ));
     2272    xptr_t     rx_client   = hal_remote_l64( XPTR( cxy , &socket->rx_client ));
    21212273    uint32_t   rx_cmd      = hal_remote_l32( XPTR( cxy , &socket->rx_cmd ));
    21222274    uint32_t   rx_sts      = hal_remote_l32( XPTR( cxy , &socket->rx_sts ));
    21232275    uint32_t   rx_nxt      = hal_remote_l32( XPTR( cxy , &socket->rx_nxt ));         
    21242276    uint32_t   rx_wnd      = hal_remote_l32( XPTR( cxy , &socket->rx_wnd ));         
    2125     uint32_t   rx_irs      = hal_remote_l32( XPTR( cxy , &socket->rx_irs ));         
    2126 
    2127     if( func_str == NULL )
    2128     {
    2129         printk("\n****** socket[%x,%d] / xptr[%x,%x]*****\n",
    2130         pid, fdid, cxy, socket );
     2277    uint32_t   rx_irs      = hal_remote_l32( XPTR( cxy , &socket->rx_irs )); 
     2278
     2279    remote_queuelock_t * lock_ptr = &socket->lock; 
     2280    uint32_t   taken       = hal_remote_l32( XPTR( cxy , &lock_ptr->taken ));
     2281
     2282    thread_t * tx_ptr = GET_PTR( tx_client );
     2283    cxy_t      tx_cxy = GET_CXY( tx_client );
     2284    trdid_t    tx_tid = hal_remote_l32( XPTR( tx_cxy , &tx_ptr->trdid ));
     2285
     2286    thread_t * rx_ptr = GET_PTR( rx_client );
     2287    cxy_t      rx_cxy = GET_CXY( rx_client );
     2288    trdid_t    rx_tid = hal_remote_l32( XPTR( rx_cxy , &rx_ptr->trdid ));
     2289
     2290    if( string == NULL )
     2291    {
     2292        printk("\n****** socket[%x,%d] / lock %d / in %s / cycle %d *****\n",
     2293        pid, fdid, taken, func_str, cycle );
    21312294    }
    21322295    else
    21332296    {
    2134         printk("\n***** socket[%x,%d] / xptr[%x,%x] / from %s *****\n",
    2135         pid, fdid, cxy, socket, func_str );
    2136     }
    2137     printk(" - state %s / channel %d\n"
    2138            " - local_addr %x / local_port %x\n"
    2139            " - remote_addr %x / remote_port %x\n"
    2140            " - tx_valid %d (%s) / tx_sts %d / tx_len %x / tx_todo %x\n"
    2141            " - tx_una %x / tx_nxt %x / tx_wnd %x\n"
    2142            " - rx_valid %d (%s) / rx_sts %d\n"
    2143            " - rx_nxt %x / rx_wnd %x / rx_irs %x\n",
    2144            socket_state_str(state), channel ,
    2145            local_addr, local_port,
    2146            remote_addr, remote_port,
    2147            tx_valid, socket_cmd_type_str(tx_cmd), tx_sts, tx_len, tx_todo,
    2148            tx_una, tx_nxt, tx_wnd,
    2149            rx_valid, socket_cmd_type_str(rx_cmd), rx_sts,
    2150            rx_nxt, rx_wnd, rx_irs );
     2297        printk("\n***** socket[%x,%d] / lock %d / in %s %s / cycle %d *****\n",
     2298        pid, fdid, taken, func_str, string, cycle );
     2299    }
     2300    printk(" - state %s / channel %d / local [%x,%x] / remote[%x,%x]\n"
     2301           " - tx : valid %d / client [%x,%x] / cmd %s \n"
     2302           "        sts %d / len %x / todo %x / ack %x / una %x / nxt %x / wnd %x\n"
     2303           " - rx : valid %d / client [%x,%x] / cmd %s\n"
     2304           "        sts %d / nxt %x / wnd %x / irs %x\n",
     2305           socket_state_str(state), channel,
     2306           local_addr, local_port, remote_addr, remote_port,
     2307           tx_valid, pid, tx_tid, socket_cmd_type_str(tx_cmd),
     2308           tx_sts, tx_len, tx_todo, tx_ack, tx_una, tx_nxt, tx_wnd,
     2309           rx_valid, pid, rx_tid, socket_cmd_type_str(rx_cmd),
     2310           rx_sts, rx_nxt, rx_wnd, rx_irs );
    21512311
    21522312}  // end socket_display()
  • trunk/kernel/kern/ksocket.h

    r669 r683  
    11/*
    2  * ksocket.h - kernel socket descriptor and API definition.
     2 * ksocket.h - kernel socket definition.
    33 *
    44 * Authors  Alain Greiner    (2016,2017,2018,2019,2020)
     
    4040 * existing sockets is split in as many subsets as the number of NIC channels, in order
    4141 * to parallelize the transfers. The distribution key defining the channel index
    42  * is computed from the (remote_addr/remote_port) couple: by the NIC hardware for the
    43  * RX packets; by the software for the TX packets, using a dedicated NIC driver function.
     42 * is computed from the (remote_addr/remote_port) couple (by the NIC hardware for the
     43 * RX packets; by the software for the TX packets) using a dedicated NIC driver function.
    4444 * All sockets that have the same key share the same channel, and each socket is
    4545 * therefore linked to two chdevs : NIC_TX[key] & NIC_RX[key].
     
    5252 *   to the associated TX server (mainly used to handle the TCP ACKs).
    5353 * - the kernel "crq" buffer allows to store concurrent remote client connect requests
    54  *   to a local server socket. It is allocated in socket.
     54 *   to a local server socket.
    5555 *
    5656 * The synchronisation mechanism between the client threads and the server threads
    5757 * is different for the TX and RX directions:
    5858 *
    59  * 1) TX stream
     59 * 1) TX direction (sent packets)
    6060 *
    6161 * - The internal API between the TX client thread and the NIC_TX server thread defines
    6262 *   four command types, stored in the "tx_cmd" variable of the socket descriptor:
    63  *   . SOCKET_TX_CONNECT : TCP client request to start the 3 steps connection handshake.
    64  *   . SOCKET_TX_ACCEPT  : TCP server request to accept one pending connection request.
     63 *   . SOCKET_TX_CONNECT : request to start the connection handshake (TCP client only).
     64 *   . SOCKET_TX_ACCEPT  : request to accept one connection request (TCP server only).
    6565 *   . SOCKET_TX_SEND    : local (UDP/TCP) request to send data to a remote (UDP/TCP).
    6666 *   . SOCKET_TX_CLOSE   : local TCP socket request remote TCP socket to close connection.
     
    6969 *   reset the "tx_error" field, and registers itself in the "tx_client" field.
    7070 *   Then, it unblocks the TX server thread from the BLOCKED_CLIENT condition, blocks itself
    71  *   on the BLOCKED_IO condition, and deschedules. For a SEND, the "tx_buf" kernel buffer
    72  *   is dynamicaly allocated by the client thread, that copies the payload from the user
    73  *   buffer to this kernel buffer, that is used as retransmission buffer, when required.
     71 *   on the BLOCKED_IO condition, and deschedules. For a SEND, the client thread copies
     72 *   the payload contained in the "u_buf" user buffer to the socket "tx_buf" kernel buffer
     73 *   that is used as retransmission buffer, when required.
    7474 * - A command is valid for the TX server when the socket descriptor "tx_valid" is true.
    75  *   For a SEND command, the "tx_valid" is reset by the NIC_TX server when the last byte has
    76  *   been sent, but the TX client thread is unblocked by the NIC_RX server thread only when
    77  *   the last byte has been acknowledged, or to report an error.
     75 *   For a SEND command, the "tx_valid" is reset by the NIC_TX server thread when the last
     76 *   byte has been sent, but the TX client thread is unblocked by the NIC_RX server thread
     77 *   only when the last byte has been acknowledged, or to report an error.
    7878 *   For the CONNECT, ACCEPT and CLOSE commands, the "tx_valid" is reset by the NIC_TX server
    7979 *   when the first segment of the handshake has been sent, but the TX client thread is
     
    8888 *   When "tx_valid" or "r2t_valid" are true, the TX server thread build and send an UDP
    8989 *   packet or TCP segment. A single SEND command can require a large number of TCP
    90  *   segments to move a big data buffer.
     90 *   segments to move a big data buffer, before unblocking the client thread.
    9191 *   This TX server thread blocks and deschedules on the BLOCKED_ISR condition when there
    9292 *   the NIC_RX queue is full . It is unblocked by the hardware NIC_TX_ISR.
    93  * - In order to detect and report error for multiple simultaneous TX accesses to the same
    94  *   socket, the client thread makes a double check before posting a new TX command :
     93 * - As multiple simultaneous TX accesses to the same socket are forbiden, the client
     94 *   thread makes a double check before posting a new TX command :
    9595 *   the "tx_valid" field must be false, and the "tx_client" field must be XPTR_NULL.
    9696 *   The "tx_valid" field is reset by the TX server thread, and the "tx_client"
     
    136136 * 3) R2T queue
    137137 *
    138  * To implement the TCP "3 steps handshake" protocol for connection or to send RST,
    139  * the RX server thread can directly request the associated TX server thread to send
    140  * control packets in  the TX stream, using a dedicate R2T (RX to TX) FIFO stored in
    141  * the socket descriptor. Each R2T request occupy one byte in this R2T queue.
     138 * The RX server thread can directly request the associated TX server thread to send
     139 * control packets in  the TX stream, using a dedicate R2T (RX to TX) queue embedded in
     140 * the socket descriptor, and implemented as a remote_buf_t FIFO.
     141 * It is used for TCP acknowledge and for the TCP three-steps handshake.
     142 * Each R2T request occupy exactly one single byte defining the TCP flags to be set.
    142143 *
    143144 * 4) CRQ queue
    144145 *
    145146 * The remote CONNECT requests received by a TCP socket (SYN segments) are stored in a
    146  * dedicated CRQ FIFO stored in the local socket descriptor. These requests are consumed
    147  * by the local client thread executing an ACCEPT.
    148  * Each CRQ request occupy sizeof(connect_request_t) bytes in this CRQ queue.
     147 * dedicated CRQ queue, and consumed  by the local client thread executing an ACCEPT.
     148 * This CRQ queue is embedded in the local socket descriptor,  and implemented as a
     149 * remote_buf_t FIFO. Each request occupy sizeof(connect_request_t) bytes in the queue.
    149150 * The connect_request_t structure containing the request arguments is defined below.
    150151 *
     
    171172 * This enum defines the set of command status that can be returned by the NIC_RX and
    172173 * NIC_TX server threads to the TX & RX client threads.
    173  * The success must be signaled by the null value / the various failure cases are
    174  * signaled by a non-null value.
    175174 ****************************************************************************************/
    176175typedef enum socket_cmd_sts_e
     
    217216tcp_socket_state_t;
    218217
    219 /*****************************************************************************************
     218/****************************************************************************************
    220219 * This structure defines one connection request, registered in the CRQ queue.
    221  ****************************************************************************************/
     220 ***************************************************************************************/
    222221typedef struct connect_request_s
    223222{
     
    229228connect_request_t;
    230229
    231 /*****************************************************************************************
     230/****************************************************************************************
    232231 * This structure defines the socket descriptor.
    233  ****************************************************************************************/
     232 ***************************************************************************************/
    234233typedef struct socket_s
    235234{
     
    253252    uint8_t         *  tx_buf;       /*! pointer on TX data buffer in kernel space     */
    254253    uint32_t           tx_len;       /*! number of data bytes for a SEND command       */
    255     uint32_t           tx_todo;      /*! number of bytes not yet sent                  */
    256     xlist_entry_t      tx_temp;      /*! temporary list of sockets (root in TX chdev)  */
     254    uint32_t           tx_todo;      /*! number of bytes not yet sent in tx_buf        */
     255    uint32_t           tx_ack;       /*! number of bytes acknowledged in tx_buf        */
    257256
    258257    xlist_entry_t      rx_list;      /*! all sockets attached to same NIC_RX channel   */
     
    271270    uint32_t           tx_wnd;       /*! number of acceptable bytes in TX_data stream  */
    272271    uint32_t           tx_una;       /*! first unack byte in TX_data stream            */
     272
    273273    uint32_t           rx_nxt;       /*! next expected byte in RX_data stream          */
    274274    uint32_t           rx_wnd;       /*! number of acceptable bytes in RX_data stream  */
     
    319319
    320320/****************************************************************************************
    321  * This function is called by the dev_nic_rx_handle_tcp() function, executed by the
    322  * NIC_RX[channel] server thread, to register a R2T request defined by the <flags>
     321 * This blocking function is called by the dev_nic_rx_handle_tcp() function, executed by
     322 * the NIC_RX[channel] server thread, to register a R2T request defined by the <flags>
    323323 * argument in the socket R2T queue, specified by the <queue_xp> argument.
    324324 * This function unblocks the NIC_TX[channel] server thread, identified by the <channel>
    325325 * argumentfrom the THREAD_BLOCKED_CLIENT condition.
     326 *
     327 * WARNING : It contains a waiting loop and return only when an empty slot has been
     328 * found in the R2T queue.
    326329 ****************************************************************************************
    327330 * @ queue_xp   : [in] extended pointer on the R2T qeue descriptor.
     
    330333 ***************************************************************************************/
    331334void socket_put_r2t_request( xptr_t    queue_xp,
    332                              uint32_t  flags,
     335                             uint8_t   flags,
    333336                             uint32_t  channel );
     337
     338/****************************************************************************************
     339 * This function is called by the nic_tx_server thread to extract an R2T request
     340 * (one byte) from a R2T queue, specified by the <queue_xp> argument, to the buffer
     341 * defined by the <flags> argument.
     342 *****************************************************************************************
     343 * @ queue_xp      : [in]  extended pointer on the CRQ queue descriptor.
     344 * @ flags         : [out] buffer for TCP flags to be set.
     345 * @ return 0 if success / return -1 if queue empty.
     346 ***************************************************************************************/
     347error_t socket_get_r2t_request (xptr_t    queue_xp,
     348                                uint8_t * flags );
    334349 
    335350/****************************************************************************************
     
    339354 * by the <queue_xp> argument.
    340355 ****************************************************************************************
    341  * @ queue_xp      : [in] extended pointer on the CRQ qeue descriptor.
     356 * @ queue_xp      : [in] extended pointer on the CRQ queue descriptor.
    342357 * @ remote_addr   : [in] remote socket IP address.
    343358 * @ remote_port   : [in] remote socket port.
     
    374389 ****************************************************************************************
    375390 * @ socket_xp     : [in] extended pointer on socket descriptor.
    376  $ @ string        : [in] name of calling function.
     391 * @ func_str      : [in] name of calling function.
     392 * @ string        : [in] string defining the calling context (can be NULL)
    377393 ***************************************************************************************/
    378394void socket_display( xptr_t         socket_xp,
    379                      const char   * func_str );
     395                     const char   * func_str,
     396                     const char   * string );
    380397
    381398
     
    464481 * This blocking function contains two blocking conditions because it requests services
    465482 * to both the NIC_RX server thread, and he NIC_TX server thread.
    466  * It can be split in five steps:
     483 * It is structured in five steps:
    467484 * 1) It makes several checkings on the listening socket domain, type, and state.
    468485 * 2) If the socket CRQ queue is empty, the function makes an SOCKET_RX_ACCEPT command
     
    529546 * arguments, to a connected (TCP or UDP) socket, identified by the <fdid> argument.
    530547 * The work is actually done by the NIC_TX server thread, and the synchronisation
     548 * between the client and the server threads uses the "tx_valid" set/reset flip-flop:
     549 * The client thread registers itself in the socket descriptor, registers in the queue
     550 * rooted in the NIC_TX[index] chdev, set "tx_valid", unblocks the server thread, and
     551 * finally blocks on THREAD_BLOCKED_IO, and deschedules.
     552 * When the TX server thread completes the command (all data has been sent for an UDP
     553 * socket, or acknowledged for a TCP socket), the server thread reset "rx_valid" and
     554 * unblocks the client thread.
     555 * This function can be called by a thread running in any cluster.
     556 * WARNING : This implementation does not support several concurent SEND commands
     557 * on the same socket, as only one TX thread can register in a given socket.
     558 ****************************************************************************************
     559 * @ fdid      : [in] file descriptor index identifying the socket.
     560 * @ u_buf     : [in] pointer on buffer containing packet in user space.
     561 * @ length    : [in] packet size in bytes.
     562 * @ return number of sent bytes if success / return -1 if failure.
     563 ***************************************************************************************/
     564int socket_send( uint32_t    fdid,
     565                 uint8_t   * u_buf,
     566                 uint32_t    length );
     567
     568/****************************************************************************************
     569 * This blocking function implements the recv() syscall.
     570 * It is used to receive data that has been stored by the NIC_RX server thread in the
     571 * rx_buf of a connected socket, identified by the <fdid> argument. 
     572 * The synchronisation between the client and the server threads uses the "rx_valid"
     573 * set/reset flip-flop: If "rx_valid" is set, the client simply moves the available
     574 * data from the "rx_buf" to the user buffer identified by the <u_buf> and <length>
     575 * arguments, and reset the "rx_valid" flip_flop. If "rx_valid" is not set, the client
     576 * thread register itself in the socket descriptor, registers in the clients queue rooted
     577 * in the NIC_RX[index] chdev, and finally blocks on THREAD_BLOCKED_IO, and deschedules.
     578 * The client thread is re-activated by the RX server, that set the "rx_valid" flip-flop
     579 * as soon as data is available in the "rx_buf". The number of bytes actually transfered
     580 * can be less than the user buffer size.
     581 * This  function can be called by a thread running in any cluster.
     582 * WARNING : This implementation does not support several concurent RECV
     583 * commands on the same socket, as only one RX thread can register in a given socket.
     584 ****************************************************************************************
     585 * @ fdid        : [in] file descriptor index identifying the local socket.
     586 * @ u_buf       : [in] pointer on buffer in user space.
     587 * @ length      : [in] buffer size in bytes.
     588 * @ return number of received bytes if success / return -1 if failure.
     589 ***************************************************************************************/
     590int socket_recv( uint32_t    fdid,
     591                 uint8_t   * u_buf,
     592                 uint32_t    length );
     593
     594/****************************************************************************************
     595 * This blocking function implements the sendto() syscall.
     596 * It is used to send data stored in the user buffer, identified the <u_buf> and <length>
     597 * to a remote process identified by the <remote_ip> and <remote_port> arguments,
     598 * through a local, unconnected (UDP) socket, identified by the <fdid> argument.
     599 * The work is actually done by the NIC_TX server thread, and the synchronisation
    531600 * between the client and the server threads uses the "rx_valid" set/reset flip-flop:
    532601 * The client thread registers itself in the socket descriptor, registers in the queue
     
    539608 * WARNING : This implementation does not support several concurent SEND/SENDTO commands
    540609 * on the same socket, as only one TX thread can register in a given socket.
    541  ****************************************************************************************
    542  * @ fdid      : [in] file descriptor index identifying the socket.
    543  * @ u_buf     : [in] pointer on buffer containing packet in user space.
    544  * @ length    : [in] packet size in bytes.
     610 * TODO : this function is not implemented yet.
     611 ****************************************************************************************
     612 * @ fdid        : [in] file descriptor index identifying the local socket.
     613 * @ u_buf       : [in] pointer on buffer containing packet in user space.
     614 * @ length      : [in] packet size in bytes.
     615 * @ remote_ip   : [in] remote socket IP address.
     616 * @ remote_port : [in] remote socket port address.
    545617 * @ return number of sent bytes if success / return -1 if failure.
    546618 ***************************************************************************************/
    547 int socket_send( uint32_t    fdid,
    548                  uint8_t   * u_buf,
    549                  uint32_t    length );
    550 
    551 /****************************************************************************************
    552  * This blocking function implements the recv() syscall.
     619int socket_sendto( uint32_t   fdid,
     620                   uint8_t  * u_buf,
     621                   uint32_t   length,
     622                   uint32_t   remote_ip,
     623                   uint16_t   remote_port );
     624
     625/****************************************************************************************
     626 * This blocking function implements the recvfrom() syscall.
    553627 * It is used to receive data that has been stored by the NIC_RX server thread in the
    554  * rx_buf of a connected (TCP or UDP) socket, identified by the <fdid> argument.
     628 * rx_buf of a non connected socket, identified by the <fdid> argument, from a
     629 * remote process identified by the <remote_ip> and <remote_port> arguments.
    555630 * The synchronisation between the client and the server threads uses the "rx_valid"
    556631 * set/reset flip-flop: If "rx_valid" is set, the client simply moves the available
     
    565640 * WARNING : This implementation does not support several concurent RECV/RECVFROM
    566641 * commands on the same socket, as only one RX thread can register in a given socket.
    567  ****************************************************************************************
    568  * @ fdid      : [in] file descriptor index identifying the socket.
    569  * @ u_buf     : [in] pointer on buffer in user space.
    570  * @ length    : [in] buffer size in bytes.
     642 * TODO : this function is not implemented yet.
     643 ****************************************************************************************
     644 * @ fdid        : [in] file descriptor index identifying the local socket.
     645 * @ u_buf       : [in] pointer on buffer in user space.
     646 * @ length      : [in] buffer size in bytes.
     647 * @ remote_ip   : [in] remote socket IP address.
     648 * @ remote_port : [in] remote socket port address.
    571649 * @ return number of received bytes if success / return -1 if failure.
    572650 ***************************************************************************************/
    573 int socket_recv( uint32_t    fdid,
    574                  uint8_t   * u_buf,
    575                  uint32_t    length );
     651int socket_recvfrom( uint32_t    fdid,
     652                     uint8_t   * u_buf,
     653                     uint32_t    length,
     654                     uint32_t    remote_ip,
     655                     uint16_t    remote_port );
    576656
    577657/****************************************************************************************
  • trunk/kernel/kern/pipe.c

    r669 r683  
    22 * pipe.c - single writer, single reader pipe implementation           
    33 *
    4  * Author     Alain Greiner (2016,2017,2018,2019,2020)
     4 * Author     Alain Greiner     (2016,2017,2018,2019,2020)
    55 *
    66 * Copyright (c) UPMC Sorbonne Universites
     
    3232                      uint32_t size )
    3333{
    34     kmem_req_t     req;
    3534    remote_buf_t * buf;
    3635    pipe_t       * pipe;
     
    5554
    5655    // 3. allocate memory for pipe descriptor
    57     req.type  = KMEM_KCM;
    58     req.order = bits_log2( sizeof(pipe_t) );
    59     req.flags = AF_ZERO;
    60     pipe = kmem_remote_alloc( cxy , &req );
     56    pipe = kmem_remote_alloc( cxy , bits_log2(sizeof(pipe_t)) , AF_ZERO );
    6157
    6258    if( pipe == NULL )
     
    7672void pipe_destroy( xptr_t pipe_xp )
    7773{
    78     kmem_req_t req;
    79 
    8074    pipe_t * pipe_ptr = GET_PTR( pipe_xp );
    8175    cxy_t    pipe_cxy = GET_CXY( pipe_xp );
     
    8882
    8983    // release pipe descriptor
    90     req.type = KMEM_KCM;
    91     req.ptr  = pipe_ptr;
    92     kmem_remote_free( pipe_cxy , &req );
     84    kmem_remote_free( pipe_cxy , pipe_ptr , bits_log2(sizeof(pipe_t)) );
    9385
    9486}  // end pipe_destroy()
    95 
    9687
    9788//////////////////////////////////////////
  • trunk/kernel/kern/printk.c

    r669 r683  
    4141//////////////////////////////////////////////////////////////////////////////////////
    4242// This static function is called by printk(), nolock_printk(), and snprintk(),
    43 // functions to build a string from a printf-like format, and stores it
    44 // in the buffer defined by the <string> and <length> arguments.
    45 // It does NOT add a terminating NUL character in the <string> buffer.
    46 // If success, it returns the number of bytes actually copied in the string buffer.
     43// functions to build a string from a printf-like <format>, and stores it
     44// in the buffer defined by the <string> and <size> arguments.
     45// The <format> itself is supposed to be a NUL terminated string. The <string>
     46// buffer <size> must be large enough to contains also the NUL terminating character.
     47// If success, it returns the number of bytes actually copied in the <string> buffer,
     48// but this length does NOT include the terminating NUL character.
    4749// It returns -2 in case of illegal format, it returns -1 if the formated string
    48 // exceeds the length argument.
     50// exceeds the <size> argument.
    4951//////////////////////////////////////////////////////////////////////////////////////
    5052// @ string    : buffer allocated by caller.
    51 // @ length    : buffer size in bytes
     53// @ size      : buffer size in bytes
    5254// @ format    : printf like format.
    5355// @ args      : va_list of arguments.
     
    5557//////////////////////////////////////////////////////////////////////////////////////
    5658static int32_t format_to_string( char       * string,
    57                                  uint32_t     length,
     59                                 uint32_t     size,
    5860                                 const char * format,
    5961                                 va_list    * args )
    6062{
    6163
    62 #define TO_STRING(x) do { string[ps] = (x); ps++; if(ps==length) return -1; } while(0);
     64#define TO_STRING(x) do { string[ps] = (x); ps++; if(ps==size) return -1; } while(0);
    6365
    6466    uint32_t   ps = 0;        // index in string buffer
     
    7476            goto format_to_string_arguments;
    7577        }
    76         else                  // copy one char to string
     78        else                  // copy one char of format to string
    7779        {
    7880            TO_STRING( *format );
     
    8183    }
    8284
    83     TO_STRING( 0 );
    84     return ps;
     85    TO_STRING( 0 );      // NUL character written in buffer
     86    return (ps - 1);     // but not counted in length
    8587
    8688format_to_string_arguments:
     
    9597        switch (*format)
    9698        {
    97             case ('c'):             // char conversion
    98             {
    99                 int val = va_arg( *args , int );
    100                 buf[0] = (char)val;
     99            case ('c'):             // one printable character
     100            {
     101                buf[0] = (char)va_arg( *args , uint32_t );
    101102                pbuf   = buf;
    102103                len    = 1;
    103104                break;
    104105            }
    105             case ('d'):             // up to 10 digits decimal signed integer
     106            case ('b'):             // one ASCII code value (2 hexadecimal digits)
     107            {
     108                uint8_t val = (uint8_t)va_arg( *args , uint32_t );
     109                buf[1] = HexaTab[val & 0xF];
     110                buf[0] = HexaTab[(val >> 4) & 0xF];
     111                pbuf   = buf;
     112                len    = 2;
     113                break;
     114            }           
     115            case ('d'):            // one int32_t (up to 10 decimal digits after sign)
    106116            {
    107117                int32_t val = va_arg( *args , int32_t );
     
    120130                break;
    121131            }
    122             case ('u'):             // up to 10 digits decimal unsigned integer
     132            case ('u'):           // one uint32_t (up to 10 decimal digits)
    123133            {
    124134                uint32_t val = va_arg( *args , uint32_t );
     
    132142                break;
    133143            }
    134             case ('x'):             // up to 8 digits hexad after "0x"
    135             case ('X'):             // exactly 8 digits hexa after "0x"
     144            case ('x'):           // one uint32_t (up to 8 hexa digits after "0x")
     145
    136146            {
    137147                uint32_t val = va_arg( *args , uint32_t );
     
    141151                {
    142152                    buf[7 - i] = HexaTab[val & 0xF];
    143                     if( (*format == 'x') && ((val >> 4) == 0) )  break;
    144153                    val = val >> 4;
     154                    if(val == 0)  break;
    145155                }
    146156                len =  i + 1;
     
    148158                break;
    149159            }
    150             case ('l'):             // up to 16 digits hexa after "0x"
    151             case ('L'):             // exactly 16 digits hexa after "0x"
     160            case ('X'):         // one uint32_t (exactly 8 hexa digits after "0x")
     161            {
     162                uint32_t val = va_arg( *args , uint32_t );
     163                TO_STRING( '0' );
     164                TO_STRING( 'x' );
     165                for(i = 0 ; i < 8 ; i++)
     166                {
     167                    buf[7 - i] = (val != 0) ? HexaTab[val & 0xF] : '0';
     168                    val = val >> 4;
     169                }
     170                len = 8;
     171                pbuf = &buf[0];
     172                break;
     173            }
     174            case ('l'):          // one uint64_t (up to 16 digits hexa after "0x")
    152175            {
    153176                uint64_t val = (((uint64_t)va_arg( *args, uint32_t)) << 32) |
     
    158181                {
    159182                    buf[15 - i] = HexaTab[val & 0xF];
    160                     if( (*format == 'l') && ((val >> 4) == 0) )  break;
    161183                    val = val >> 4;
     184                    if( val == 0)  break;
    162185                }
    163186                len =  i + 1;
     
    165188                break;
    166189            }
    167             case ('s'):             /* string */
     190            case ('L'):          // one uint64_t (exactly 16 digits hexa after "0x")
     191            {
     192                uint64_t val = (((uint64_t)va_arg( *args, uint32_t)) << 32) |
     193                               ((uint64_t)va_arg( *args, uint32_t));
     194                TO_STRING( '0' );
     195                TO_STRING( 'x' );
     196                for(i = 0 ; i < 16 ; i++)
     197                {
     198                    buf[15 - i] = (val != 0) ? HexaTab[val & 0xF] : '0';
     199                    val = val >> 4;
     200                }
     201                len =  16;
     202                pbuf = &buf[0];
     203                break;
     204            }
     205            case ('s'):             /* one characters string */
    168206            {
    169207                char* str = va_arg( *args , char* );
     
    213251    // build a string from format
    214252    length = format_to_string( buffer,
    215                       CONFIG_PRINTK_BUF_SIZE,
    216                       format,
    217                       &args );
     253                               CONFIG_PRINTK_BUF_SIZE,
     254                               format,
     255                               &args );
    218256    va_end( args );
    219257
     
    258296    // build a string from format
    259297    length = format_to_string( buffer,
    260                       CONFIG_PRINTK_BUF_SIZE,
    261                       format,
    262                       &args );
     298                               CONFIG_PRINTK_BUF_SIZE,
     299                               format,
     300                               &args );
    263301    va_end( args );
    264302
     
    315353        if( length > 0  )  // display panic message on TXT0, including formated string
    316354        {
    317             printk("\n[ASSERT] in %s / core[%x,%d] / thread[%x,%x] / cycle %d\n       %s\n",
     355            printk("\n[ASSERT] in %s / core[%x,%d] / thread[%x,%x] / cycle %d\n   <%s>\n",
    318356            func_name, local_cxy, lid, pid, trdid, cycle, buffer );
    319357        }
     
    332370{
    333371    va_list       args;
    334     int32_t       string_length;
     372    int32_t       length;
    335373
    336374    // build args va_list
     
    338376
    339377    // build a string from format
    340     string_length = format_to_string( buffer , size , format , &args );
     378    length = format_to_string( buffer , size , format , &args );
     379
     380    // release args list
    341381    va_end( args );
    342382
    343     if( (string_length < 0) || (string_length == (int32_t)size) )  // failure
    344     {
    345         return -1;
    346     }
    347     else                                                           // success
    348     {
    349         // add NUL character
    350         buffer[string_length] = 0;
    351 
    352         return string_length;
    353     }
     383    if( length < 0 )   return -1;
     384    else               return length;
     385
    354386}   // end snprintk()
    355387
  • trunk/kernel/kern/printk.h

    r669 r683  
    2424///////////////////////////////////////////////////////////////////////////////////
    2525// The printk.c and printk.h files define the functions used by the kernel
    26 // to display messages on the kernel terminal TXT0, using a busy waiting policy.
    27 // It calls synchronously the TXT0 driver, without descheduling.
     26// to build, or display on terminal TXT0, formated strings.
     27// In case ofdisplay, it calls synchronously the TXT0 driver, without descheduling.
    2828//
    29 // For the formated string, the supported formats are defined below :
     29// The supported formats are defined below :
    3030//   %c : single ascii character (8 bits)
     31//   %b : exactly 2 hexadecimal digits (8 bits)
    3132//   %d : up to 10 digits decimal integer (32 bits)
    3233//   %u : up to 10 digits unsigned decimal (32 bits)
     
    4748
    4849/**********************************************************************************
    49  * These debug functions display a formated string defined by the <format,...>
     50 * These functions display a formated string defined by the <format,...>
    5051 * argument on the kernel terminal TXT0, with or without taking the TXT0 lock.
    5152 **********************************************************************************
     
    6465
    6566/**********************************************************************************
    66  * This debug function displays a [ASSERT] message on kernel TXT0 terminal
     67 * This function displays an [ASSERT] message on kernel TXT0 terminal
    6768 * if Boolean expression <expr> is false. It prints a detailed message including:
    6869 * - the calling core [cxy,lpid]
     
    8384 * This function build a formated string in a buffer defined by the <buffer>
    8485 * and <buf_size> arguments, from the format defined by the <format,...> argument.
    85  * This function set the NUL terminating character in target <buffer>.
     86 * This function set the NUL terminating character in target <buffer>,
     87 * but the returned length does not include this NUL character.
    8688 **********************************************************************************
    8789 * @ buffer     : pointer on target buffer (allocated by caller).
     
    142144 * @ string   : buffer name or identifier.
    143145 * @ buffer   : local pointer on bytes array.
    144  * @ size     : number of bytes bytes to display.
     146 * @ size     : number of bytes to display.
    145147 *********************************************************************************/
    146148void putb( char     * string,
  • trunk/kernel/kern/process.c

    r669 r683  
    22 * process.c - process related functions definition.
    33 *
    4  * Authors  Ghassan Almaless (2008,2009,2010,2011,2012)
     4 * Authors  Ghassan Almaless       (2008,2009,2010,2011,2012)
    55 *          Mohamed Lamine Karaoui (2015)
    6  *          Alain Greiner (2016,2017,2018,2019,2020)
     6 *          Alain Greiner          (2016,2017,2018,2019,2020)
    77 *
    88 * Copyright (c) UPMC Sorbonne Universites
     
    7070//////////////////////////////////////////////////////////////////////////////////////////
    7171
    72 /////////////////////////////////
    73 process_t * process_alloc( void )
    74 {
    75 
    76 assert( __FUNCTION__, (sizeof(process_t) < CONFIG_PPM_PAGE_SIZE),
    77 "process descriptor exceeds 1 page" );
    78 
    79         kmem_req_t req;
    80 
    81     req.type  = KMEM_PPM;
    82         req.order = 0;
    83         req.flags = AF_KERNEL | AF_ZERO;
    84     return kmem_alloc( &req );
    85 }
    86 
    87 ////////////////////////////////////////
    88 void process_free( process_t * process )
    89 {
    90     kmem_req_t  req;
    91 
    92         req.type = KMEM_PPM;
    93         req.ptr  = process;
    94         kmem_free( &req );
    95 }
    96 
    9772////////////////////////////////////////////////////
    9873error_t process_reference_init( process_t * process,
     
    11691    vmm_t     * vmm;
    11792
    118     // build extended pointer on this reference process
     93#if DEBUG_PROCESS_REFERENCE_INIT || DEBUG_PROCESS_ERROR
     94thread_t * this  = CURRENT_THREAD;
     95uint32_t   cycle = (uint32_t)hal_get_cycles();
     96#endif
     97
     98    // build extended pointer on reference process
    11999    process_xp = XPTR( local_cxy , process );
    120100
     
    130110
    131111#if DEBUG_PROCESS_REFERENCE_INIT
    132 thread_t * this = CURRENT_THREAD;
    133 uint32_t cycle = (uint32_t)hal_get_cycles();
    134112if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
    135113printk("\n[%s] thread[%x,%x] enter to initialize process %x / cycle %d\n",
     
    156134    if( error )
    157135    {
    158         printk("\n[ERROR] in %s : cannot create empty GPT\n", __FUNCTION__ );
     136
     137#if DEBUG_PROCESS_ERROR
     138printk("\n[ERROR] in %s : thread[%x,%x] cannot create empty GPT / cycle %d\n",
     139__FUNCTION__, this->process->pid, this->trdid, cycle );
     140#endif
    159141        return -1;
    160142    }
     
    173155    if( error )
    174156    {
     157
     158#if DEBUG_PROCESS_ERROR
     159printk("\n[ERROR] in %s : thread[%x,%x] cannot register kernel vsegs in VMM / cycle %d\n",
     160__FUNCTION__, this->process->pid, this->trdid, cycle );
     161#endif
    175162        printk("\n[ERROR] in %s : cannot register kernel vsegs in VMM\n", __FUNCTION__ );
    176163        return -1;
     
    233220        if( error )
    234221        {
    235             printk("\n[ERROR] in %s : cannot open stdout pseudo-file\n", __FUNCTION__ );
     222
     223#if DEBUG_PROCESS_ERROR
     224printk("\n[ERROR] in %s : thread[%x,%x] cannot open stdin pseudo file / cycle %d\n",
     225__FUNCTION__, this->process->pid, this->trdid, cycle );
     226#endif
    236227            return -1;
    237228        }
     
    256247        if( error )
    257248        {
    258             printk("\n[ERROR] in %s : cannot open stdout pseudo-file\n", __FUNCTION__ );
     249
     250#if DEBUG_PROCESS_ERROR
     251printk("\n[ERROR] in %s : thread[%x,%x] cannot open stdout pseudo file / cycle %d\n",
     252__FUNCTION__, this->process->pid, this->trdid, cycle );
     253#endif
    259254            return -1;
    260255        }
     
    279274        if( error )
    280275        {
    281             printk("\n[ERROR] in %s : cannot open stderr pseudo-file\n", __FUNCTION__ );
     276
     277#if DEBUG_PROCESS_ERROR
     278printk("\n[ERROR] in %s : thread[%x,%x] cannot open stderr pseudo file / cycle %d\n",
     279__FUNCTION__, this->process->pid, this->trdid, cycle );
     280#endif
    282281            return -1;
    283282        }
     
    302301
    303302        // recreate all open files from parent process fd_array to child process fd_array
    304         process_fd_replicate( process_xp , parent_xp );
     303        error = process_fd_replicate( process_xp , parent_xp );
     304
     305        if( error )
     306        {
     307
     308#if DEBUG_PROCESS_ERROR
     309printk("\n[ERROR] in %s : thread[%x,%x] cannot replicate fd_array / cycle %d\n",
     310__FUNCTION__, this->process->pid, this->trdid, cycle );
     311#endif
     312            return -1;
     313        }
     314
    305315    }
    306316
     
    379389    vmm_t   * vmm;
    380390
     391#if DEBUG_PROCESS_COPY_INIT || DEBUG_PROCESS_ERROR
     392thread_t * this = CURRENT_THREAD; 
     393uint32_t cycle = (uint32_t)hal_get_cycles();
     394#endif
     395
    381396    // get reference process cluster and local pointer
    382397    cxy_t       ref_cxy = GET_CXY( reference_process_xp );
     
    394409
    395410#if DEBUG_PROCESS_COPY_INIT
    396 thread_t * this = CURRENT_THREAD; 
    397 uint32_t cycle = (uint32_t)hal_get_cycles();
    398411if( DEBUG_PROCESS_COPY_INIT < cycle )
    399412printk("\n[%s] thread[%x,%x] enter for process %x / cycle %d\n",
     
    410423    // create an empty GPT as required by the architecture
    411424    error = hal_gpt_create( &vmm->gpt );
     425
    412426    if( error )
    413427    {
    414         printk("\n[ERROR] in %s : cannot create empty GPT\n", __FUNCTION__ );
     428
     429#if DEBUG_PROCESS_ERROR
     430printk("\n[ERROR] in %s : thread[%x,%x] cannot create empty GPT / cycle %d\n",
     431__FUNCTION__, this->process->pid, this->trdid, cycle );
     432#endif
    415433        return -1;
    416434    }
     
    421439    // register kernel vsegs in VMM as required by the architecture
    422440    error = hal_vmm_kernel_update( local_process );
     441
    423442    if( error )
    424443    {
    425         printk("\n[ERROR] in %s : cannot register kernel vsegs in VMM\n", __FUNCTION__ );
     444
     445#if DEBUG_PROCESS_ERROR
     446printk("\n[ERROR] in %s : thread[%x,%x] cannot register kernel vsegs in VMM / cycle %d\n",
     447__FUNCTION__, this->process->pid, this->trdid, cycle );
     448#endif
    426449        return -1;
    427450    }
     
    431454    // initialize locks protecting GPT and VSL
    432455    error = vmm_user_init( local_process );
     456
    433457    if( error )
    434458    {
    435         printk("\n[ERROR] in %s : cannot register user vsegs in VMM\n", __FUNCTION__ );
     459
     460#if DEBUG_PROCESS_ERROR
     461printk("\n[ERROR] in %s : thread[%x,%x] cannot register user vsegs in VMM / cycle %d\n",
     462__FUNCTION__, this->process->pid, this->trdid, cycle );
     463#endif
    436464        return -1;
    437465    }
     
    598626
    599627    // release memory allocated to process descriptor
    600     process_free( process );
     628        kmem_free( process , bits_log2(sizeof(process_t)) );
    601629
    602630#if DEBUG_PROCESS_DESTROY
     
    9741002{
    9751003    error_t        error;
    976     process_t    * process_ptr;   // local pointer on process
     1004    process_t    * process;       // local pointer on process
    9771005    xptr_t         process_xp;    // extended pointer on process
    9781006
     1007#if DEBUG_PROCESS_GET_LOCAL_COPY || DEBUG_PROCESS_ERROR
     1008thread_t * this  = CURRENT_THREAD;
     1009uint32_t   cycle = (uint32_t)hal_get_cycles();
     1010#endif
     1011
    9791012    cluster_t * cluster = LOCAL_CLUSTER;
    9801013
    9811014#if DEBUG_PROCESS_GET_LOCAL_COPY
    982 thread_t * this = CURRENT_THREAD;
    983 uint32_t cycle = (uint32_t)hal_get_cycles();
    9841015if( DEBUG_PROCESS_GET_LOCAL_COPY < cycle )
    9851016printk("\n[%s] thread[%x,%x] enter for process %x in cluster %x / cycle %d\n",
     
    9961027    {
    9971028        process_xp  = XLIST_ELEMENT( iter , process_t , local_list );
    998         process_ptr = GET_PTR( process_xp );
    999         if( process_ptr->pid == pid )
     1029        process    = GET_PTR( process_xp );
     1030        if( process->pid == pid )
    10001031        {
    10011032            found = true;
     
    10171048
    10181049        // allocate memory for local process descriptor
    1019         process_ptr = process_alloc();
    1020 
    1021         if( process_ptr == NULL )  return NULL;
     1050        process = kmem_alloc( bits_log2(sizeof(process_t)) , AF_ZERO );
     1051
     1052        if( process == NULL )  return NULL;
    10221053
    10231054        // initialize local process descriptor copy
    1024         error = process_copy_init( process_ptr , ref_xp );
    1025 
    1026         if( error ) return NULL;
     1055        error = process_copy_init( process , ref_xp );
     1056
     1057        if( error )
     1058        {
     1059
     1060#if DEBUG_PROCESS_ERROR
     1061printk("\n[ERROR] in %s : thread[%x,%x] cannot initialize local process copy / cycle %d\n",
     1062__FUNCTION__, this->process->pid, this->trdid, cycle );
     1063#endif
     1064            return NULL;
     1065        }
    10271066    }
    10281067
     
    10311070if( DEBUG_PROCESS_GET_LOCAL_COPY < cycle )
    10321071printk("\n[%s] thread[%x,%x] exit in cluster %x / process %x / cycle %d\n",
    1033 __FUNCTION__, this->process->pid, this->trdid, local_cxy, process_ptr, cycle );
    1034 #endif
    1035 
    1036     return process_ptr;
     1072__FUNCTION__, this->process->pid, this->trdid, local_cxy, process, cycle );
     1073#endif
     1074
     1075    return process;
    10371076
    10381077}  // end process_get_local_copy()
     
    11111150    xptr_t    max_xp;          // extended pointer on max field in fd_array
    11121151
     1152#if DEBUG_PROCESS_FD_REGISTER
     1153thread_t * this  = CURRENT_THREAD;
     1154uint32_t   cycle = (uint32_t)hal_get_cycles();
     1155#endif
     1156
    11131157    // get target process cluster and local pointer
    11141158    process_t * process_ptr = GET_PTR( process_xp );
     
    11201164
    11211165#if DEBUG_PROCESS_FD_REGISTER
    1122 thread_t * this  = CURRENT_THREAD;
    1123 uint32_t   cycle = (uint32_t)hal_get_cycles();
    1124 pid_t      pid   = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid) );
     1166pid_t  tgt_pid   = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid) );
    11251167if( DEBUG_PROCESS_FD_REGISTER < cycle )
    11261168printk("\n[%s] thread[%x,%x] enter for process %x / cycle %d\n",
    1127 __FUNCTION__, this->process->pid, this->trdid, pid, cycle );
     1169__FUNCTION__, this->process->pid, this->trdid, tgt_pid, cycle );
    11281170#endif
    11291171
     
    11681210if( DEBUG_PROCESS_FD_REGISTER < cycle )
    11691211printk("\n[%s] thread[%x,%x] exit for process %x / fdid %d / cycle %d\n",
    1170 __FUNCTION__, this->process->pid, this->trdid, pid, id, cycle );
     1212__FUNCTION__, this->process->pid, this->trdid, tgt_pid, id, cycle );
    11711213#endif
    11721214
     
    13841426}  // end process_fd_get_xptr_from_local()
    13851427
    1386 /////////////////////////////////////////
    1387 void process_fd_replicate( xptr_t dst_xp,
    1388                            xptr_t src_xp )
     1428////////////////////////////////////////////
     1429error_t process_fd_replicate( xptr_t dst_xp,
     1430                              xptr_t src_xp )
    13891431{
    13901432    uint32_t fdid;      // current file descriptor index
     
    14351477            if( error )
    14361478            {
    1437                 printk("\n[ERROR] in %s : cannot create new file\n", __FUNCTION__ );
    1438                 return;
     1479
     1480#if DEBUG_PROCESS_ERROR
     1481thread_t * this  = CURRENT_THREAD;
     1482uint32_t   cycle = (uint32_t)hal_get_cycles();
     1483printk("\n[ERROR] in %s : thread[%x,%x] cannot create file descriptor / cycle %d\n",
     1484__FUNCTION__, this->process->pid, this->trdid, cycle );
     1485#endif
     1486                return -1;
    14391487            }
    14401488
     
    14461494    // release lock on source process fd_array
    14471495        remote_queuelock_release( src_lock_xp );
     1496
     1497    return 0;
    14481498
    14491499}  // end process_fd_replicate()
     
    14941544    uint32_t max = hal_remote_l32( XPTR( owner_cxy , &owner_ptr->fd_array.max ));
    14951545
    1496     printk("\n***** fd_array for pid %x in cluster %x / max %d *****\n",
     1546    // get pointers on TXT0 chdev
     1547    xptr_t    txt0_xp  = chdev_dir.txt_tx[0];
     1548    cxy_t     txt0_cxy = GET_CXY( txt0_xp );
     1549    chdev_t * txt0_ptr = GET_PTR( txt0_xp );
     1550
     1551    // get extended pointer on remote TXT0 lock
     1552    xptr_t  lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock );
     1553
     1554    // get TXT0 lock
     1555    remote_busylock_acquire( lock_xp );
     1556
     1557    nolock_printk("\n***** fd_array for pid %x in cluster %x / max %d *****\n",
    14971558    pid, process_cxy, max );
    14981559
     
    15201581
    15211582                // display relevant file descriptor info
    1522                 printk(" - %d : type %s / ptr %x (%s)\n",
     1583                nolock_printk(" - %d : type %s / ptr %x (%s)\n",
    15231584                fdid, process_fd_type_str(file_type), file_ptr, name );
    15241585            }
     
    15261587            {
    15271588                // display relevant file decriptor info
    1528                 printk(" - %d : type %s / ptr %x\n",
     1589                nolock_printk(" - %d : type %s / ptr %x\n",
    15291590                fdid , process_fd_type_str(file_type), file_ptr );
    15301591            }
     
    15321593        else
    15331594        {
    1534             printk(" - %d : empty slot\n",
    1535             fdid );
     1595            nolock_printk(" - %d : empty slot\n", fdid );
    15361596        }
    15371597    }
     1598
     1599    // get TXT0 lock
     1600    remote_busylock_acquire( lock_xp );
     1601
    15381602}   // end process_fd_display()
    15391603
     
    15481612{
    15491613    ltid_t         ltid;
     1614    ltid_t         ltid_min;
     1615
    15501616    bool_t         found = false;
     1617    lpid_t         lpid  = LPID_FROM_PID( process->pid );
    15511618 
    15521619// check arguments
     
    15541621assert( __FUNCTION__, (thread != NULL) , "thread argument is NULL" );
    15551622
    1556     // get the lock protecting th_tbl for all threads
    1557     // but the idle thread executing kernel_init (cannot yield)
     1623    // get the lock protecting th_tbl for all threads but the idle thread
    15581624    if( thread->type != THREAD_IDLE ) rwlock_wr_acquire( &process->th_lock );
    15591625
     1626    // compute ltid_min : 0 for an user thread / 1 for a kernel thread
     1627    ltid_min = (lpid == 0) ? 1 : 0;
     1628 
    15601629    // scan th_tbl
    1561     for( ltid = 0 ; ltid < CONFIG_THREADS_MAX_PER_CLUSTER ; ltid++ )
     1630    for( ltid = ltid_min ; ltid < CONFIG_THREADS_MAX_PER_CLUSTER ; ltid++ )
    15621631    {
    15631632        if( process->th_tbl[ltid] == NULL )
     
    15811650    if( thread->type != THREAD_IDLE ) rwlock_wr_release( &process->th_lock );
    15821651
    1583     return (found) ? 0 : 0xFFFFFFFF;
     1652    return (found) ? 0 : -1;
    15841653
    15851654}  // end process_register_thread()
     
    16471716"parent process must be the reference process" );
    16481717
    1649 #if DEBUG_PROCESS_MAKE_FORK
    1650 uint32_t   cycle;
     1718#if DEBUG_PROCESS_MAKE_FORK || DEBUG_PROCESS_ERROR
     1719uint32_t   cycle  = (uint32_t)hal_get_cycles();
    16511720thread_t * this  = CURRENT_THREAD;
    16521721trdid_t    trdid = this->trdid;
     
    16551724
    16561725#if( DEBUG_PROCESS_MAKE_FORK & 1 )
    1657 cycle   = (uint32_t)hal_get_cycles();
    16581726if( DEBUG_PROCESS_MAKE_FORK < cycle )
    16591727printk("\n[%s] thread[%x,%x] enter / cluster %x / cycle %d\n",
     
    16621730
    16631731    // allocate a process descriptor
    1664     process = process_alloc();
     1732    process = kmem_alloc( bits_log2(sizeof(process_t)) , AF_ZERO );
    16651733
    16661734    if( process == NULL )
    16671735    {
    1668         printk("\n[ERROR] in %s : cannot get process in cluster %x\n",
    1669         __FUNCTION__, local_cxy );
     1736
     1737#if DEBUG_PROCESS_ERROR
     1738printk("\n[ERROR] in %s : thread[%x,%x] cannot allocate process descriptor / cxy %x / cycle %d\n",
     1739__FUNCTION__, pid, trdid, local_cxy, cycle );
     1740#endif
    16701741        return -1;
    16711742    }
     
    16751746    if( error )
    16761747    {
    1677         printk("\n[ERROR] in %s : cannot get PID in cluster %x\n",
    1678         __FUNCTION__, local_cxy );
    1679         process_free( process );
     1748
     1749#if DEBUG_PROCESS_ERROR
     1750printk("\n[ERROR] in %s : thread[%x,%x] cannot get PID / cxy %x / cycle %d\n",
     1751__FUNCTION__, pid, trdid, local_cxy, cycle );
     1752#endif
     1753            kmem_free( process , bits_log2(sizeof(process_t)) );
    16801754        return -1;
    16811755    }
    16821756
    16831757#if( DEBUG_PROCESS_MAKE_FORK & 1 )
    1684 cycle = (uint32_t)hal_get_cycles();
    16851758if( DEBUG_PROCESS_MAKE_FORK < cycle )
    1686 printk("\n[%s] thread[%x,%x] allocated child_process %x / cycle %d\n",
    1687 __FUNCTION__, pid, trdid, new_pid, cycle );
     1759printk("\n[%s] thread[%x,%x] allocated child_process %x\n",
     1760__FUNCTION__, pid, trdid, new_pid );
    16881761#endif
    16891762
     
    16941767    if( error )
    16951768    {
    1696         printk("\n[ERROR] in %s : cannot initialize child process in cluster %x\n",
    1697         __FUNCTION__, local_cxy );
    1698         process_free( process );
     1769
     1770#if DEBUG_PROCESS_ERROR
     1771printk("\n[ERROR] in %s : thread[%x,%x] cannot initialize child process / cxy %x / cycle %d\n",
     1772__FUNCTION__, pid, trdid, local_cxy, cycle );
     1773#endif
     1774        cluster_pid_release( new_pid );
     1775            kmem_free( process , bits_log2(sizeof(process_t)) );
    16991776        return -1;
    17001777    }
    17011778
    17021779#if( DEBUG_PROCESS_MAKE_FORK & 1 )
    1703 cycle = (uint32_t)hal_get_cycles();
    17041780if( DEBUG_PROCESS_MAKE_FORK < cycle )
    1705 printk("\n[%s] thread[%x,%x] initialized child_process %x / cycle %d\n",
    1706 __FUNCTION__, pid, trdid, new_pid, cycle );
     1781printk("\n[%s] thread[%x,%x] initialized child_process %x\n",
     1782__FUNCTION__, pid, trdid, new_pid );
    17071783#endif
    17081784
     
    17121788    if( error )
    17131789    {
    1714         printk("\n[ERROR] in %s : cannot copy VMM in cluster %x\n",
    1715         __FUNCTION__, local_cxy );
    1716         process_free( process );
     1790
     1791#if DEBUG_PROCESS_ERROR
     1792printk("\n[ERROR] in %s : thread[%x,%x] cannot copy VMM to child process / cxy %x / cycle %d\n",
     1793__FUNCTION__, pid, trdid, local_cxy, cycle );
     1794#endif
    17171795        cluster_pid_release( new_pid );
     1796            kmem_free( process , bits_log2(sizeof(process_t)) );
    17181797        return -1;
    17191798    }
    17201799
    17211800#if( DEBUG_PROCESS_MAKE_FORK & 1 )
    1722 cycle = (uint32_t)hal_get_cycles();
    17231801if( DEBUG_PROCESS_MAKE_FORK < cycle )
    17241802{
    1725     printk("\n[%s] thread[%x,%x] copied VMM from parent to child / cycle %d\n",
    1726     __FUNCTION__, pid, trdid, cycle );
     1803    printk("\n[%s] thread[%x,%x] copied VMM from parent to child\n",
     1804    __FUNCTION__, pid, trdid );
    17271805    hal_vmm_display( XPTR( local_cxy , process ) , true );
    17281806}
     
    17361814
    17371815#if( DEBUG_PROCESS_MAKE_FORK & 1 )
    1738 cycle = (uint32_t)hal_get_cycles();
    17391816if( DEBUG_PROCESS_MAKE_FORK < cycle )
    1740 printk("\n[%s] thread[%x,%x] / child_process %x takes TXT ownership / cycle %d\n",
    1741 __FUNCTION__ , pid, trdid, new_pid, cycle );
     1817printk("\n[%s] thread[%x,%x] / child_process %x takes TXT ownership\n",
     1818__FUNCTION__ , pid, trdid, new_pid );
    17421819#endif
    17431820
     
    17531830    if( error )
    17541831    {
    1755         printk("\n[ERROR] in %s : cannot create thread in cluster %x\n",
    1756         __FUNCTION__, local_cxy );
    1757         process_free( process );
     1832
     1833#if DEBUG_PROCESS_ERROR
     1834printk("\n[ERROR] in %s : thread[%x,%x] cannot create main thread / cxy %x / cycle %d\n",
     1835__FUNCTION__, pid, trdid, local_cxy, cycle );
     1836#endif
    17581837        cluster_pid_release( new_pid );
     1838            kmem_free( process , bits_log2(sizeof(process_t)) );
    17591839        return -1;
    17601840    }
     
    17651845
    17661846#if( DEBUG_PROCESS_MAKE_FORK & 1 )
    1767 cycle = (uint32_t)hal_get_cycles();
    17681847if( DEBUG_PROCESS_MAKE_FORK < cycle )
    1769 printk("\n[%s] thread[%x,%x] created main thread %x / cycle %d\n",
    1770 __FUNCTION__, pid, trdid, thread, cycle );
     1848printk("\n[%s] thread[%x,%x] created main thread %x\n",
     1849__FUNCTION__, pid, trdid, thread );
    17711850#endif
    17721851
     
    17871866 
    17881867#if( DEBUG_PROCESS_MAKE_FORK & 1 )
    1789 cycle = (uint32_t)hal_get_cycles();
    17901868if( DEBUG_PROCESS_MAKE_FORK < cycle )
    1791 printk("\n[%s] thread[%x,%x] set COW in DATA / ANON / REMOTE for parent and child / cycle %d\n",
    1792 __FUNCTION__, pid, trdid, cycle );
     1869printk("\n[%s] thread[%x,%x] set COW in DATA / ANON / REMOTE for parent and child\n",
     1870__FUNCTION__, pid, trdid );
    17931871#endif
    17941872
     
    18191897}   // end process_make_fork()
    18201898
    1821 ////////////////////////////////////////////////i//////////////////////////////////////
    1822 // This static function is called by the thread_user_exec() function :
    1823 // - to register the main() arguments (args) in the <exec_info> structure.
    1824 // - to register the environment variables (envs) in the <exec_info> structure.
    1825 // In both cases the input is an array of NULL terminated string pointers in user
    1826 // space, and the strings can be dispatched anywhere in the user process space.
    1827 // This array of pointers is defined by the <u_pointers> argument. The empty slots
    1828 // contain the NULL value, and the N non-empty slots are indexed from 0 to (N-1).
    1829 // - The max number of envs, and the max number of args are defined by the
    1830 //   CONFIG_PROCESS_ARGS_NR and CONFIG_PROCESS_ENVS_MAX_NR parameters.
    1831 // - The numbers of pages to store the (args) and (envs) strings are defined by the
    1832 //   CONFIG_VMM_ENVS_SIZE and CONFIG_VMM_STACK_SIZE parameters.
    1833 ///////////////////////////////////////////////////////////////////////////////////////
    1834 // Implementation note:
    1835 // It allocates a kernel buffer to store a kernel copy of both the array of pointers,
    1836 // and the strings. It set the pointers and copies the strings in this kernel buffer.
    1837 // Finally, it registers the buffer & the actual number of strings in the process
    1838 // exec_info structure  (defined in the <process.h> file).
    1839 ///////////////////////////////////////////////////////////////////////////////////////
    1840 // @ is_args     : [in]    true if called for (args) / false if called for (envs).
    1841 // @ u_pointers  : [in]    array of pointers on the strings (in user space).
    1842 // @ exec_info   : [out]   pointer on the exec_info structure.
    1843 // @ return 0 if success / non-zero if too many strings or no memory.
    1844 ///////////////////////////////////////////////////////////////////////////////////////
    1845 error_t process_exec_get_strings( bool_t         is_args,
    1846                                   char        ** u_pointers,
    1847                                   exec_info_t  * exec_info )
    1848 {
    1849     uint32_t     index;           // slot index in pointers array
    1850     uint32_t     length;          // string length (in bytes)
    1851     uint32_t     pointers_bytes;  // number of bytes to store pointers
    1852     uint32_t     max_index;       // max size of pointers array
    1853     char      ** k_pointers;      // base of kernel array of pointers
    1854     char       * k_buf_ptr;       // pointer on first empty slot in strings buffer
    1855     uint32_t     k_buf_space;     // number of bytes available in string buffer
    1856     kmem_req_t   req;             // kernel memory allocator request
    1857     char       * k_buf;           // kernel buffer for both pointers & strings
    1858 
    1859 #if DEBUG_PROCESS_EXEC_GET_STRINGS
    1860 thread_t * this  = CURRENT_THREAD;
    1861 uint32_t   cycle = (uint32_t)hal_get_cycles();
    1862 #endif
    1863 
    1864     // Allocate one block of physical memory for both the pointers and the strings
    1865     // as defined by the CONFIG_VMM_ARGS_SIZE and CONFIG_VMM_ENVS_SIZE parameters
    1866     // - the array of pointers is stored in the first bytes of the kernel buffer
    1867     // - the strings themselve are stored in the next bytes of this buffer
    1868     // Set the k_pointers, k_buf_ptr, k_buf_space, and max_index
    1869 
    1870     if( is_args )
    1871     {
    1872         req.type   = KMEM_PPM;
    1873         req.order  = bits_log2( CONFIG_VMM_ARGS_SIZE );
    1874         req.flags  = AF_KERNEL | AF_ZERO;
    1875         k_buf      = kmem_alloc( &req );
    1876 
    1877         pointers_bytes = CONFIG_PROCESS_ARGS_MAX_NR * sizeof(char *);
    1878         k_pointers     = (char **)k_buf;
    1879         k_buf_ptr      = k_buf + pointers_bytes;
    1880         k_buf_space    = (CONFIG_VMM_ARGS_SIZE * CONFIG_PPM_PAGE_SIZE) - pointers_bytes;
    1881         max_index      = CONFIG_PROCESS_ARGS_MAX_NR;
    1882 
    1883 #if DEBUG_PROCESS_EXEC_GET_STRINGS
    1884 if( DEBUG_PROCESS_EXEC_GET_STRINGS < cycle )
    1885 printk("\n[%s] thread[%x,%x] for args / u_buf %x / k_buf %x\n",
    1886 __FUNCTION__, this->process->pid, this->trdid, u_pointers, k_buf );
    1887 #endif
    1888 
    1889     }
    1890     else
    1891     {
    1892         req.type   = KMEM_PPM;
    1893         req.order  = bits_log2( CONFIG_VMM_ENVS_SIZE );
    1894         req.flags  = AF_KERNEL | AF_ZERO;
    1895         k_buf      = kmem_alloc( &req );
    1896 
    1897         pointers_bytes = CONFIG_PROCESS_ENVS_MAX_NR * sizeof(char *);
    1898         k_pointers     = (char **)k_buf;
    1899         k_buf_ptr      = k_buf + pointers_bytes;
    1900         k_buf_space    = (CONFIG_VMM_ENVS_SIZE * CONFIG_PPM_PAGE_SIZE) - pointers_bytes;
    1901         max_index      = CONFIG_PROCESS_ENVS_MAX_NR;
    1902 
    1903 #if DEBUG_PROCESS_EXEC_GET_STRINGS
    1904 if( DEBUG_PROCESS_EXEC_GET_STRINGS < cycle )
    1905 printk("\n[%s] thread[%x,%x] for envs / u_buf %x / k_buf %x\n",
    1906 __FUNCTION__, this->process->pid, this->trdid, u_pointers, k_buf );
    1907 #endif
    1908 
    1909     }
    1910 
    1911     // copy the user array of pointers to kernel buffer
    1912     hal_copy_from_uspace( XPTR( local_cxy , k_pointers ),
    1913                           u_pointers,
    1914                           pointers_bytes );
    1915 
    1916     // WARNING : the pointers copied in the k_pointers[] array are user pointers,
    1917     // after the loop below, the k_pointers[] array contains kernel pointers.
    1918 
    1919 #if DEBUG_PROCESS_EXEC_GET_STRINGS
    1920 if( DEBUG_PROCESS_EXEC_GET_STRINGS < cycle )
    1921 printk("\n[%s] thread[%x,%x] copied u_ptr array to k_ptr array\n"
    1922 "    p0 = %x / p1 = %x / p2 = %x / p3 = %x\n",
    1923 __FUNCTION__, this->process->pid, this->trdid,
    1924 k_pointers[0], k_pointers[1], k_pointers[2], k_pointers[3] );
    1925 #endif
    1926 
    1927     // scan kernel array of pointers to copy strings to kernel buffer
    1928     for( index = 0 ; index < max_index ; index++ )
    1929     {
    1930         // exit loop if (k_pointers[] == NUll)
    1931         if( k_pointers[index] == NULL ) break;
    1932 
    1933         // compute string length
    1934         length = hal_strlen_from_uspace( k_pointers[index] ) + 1;
    1935 
    1936         // return error if overflow in kernel buffer
    1937         if( length > k_buf_space ) return -1;
    1938 
    1939         // copy the string to kernel buffer
    1940         hal_copy_from_uspace( XPTR( local_cxy , k_buf_ptr ),
    1941                               k_pointers[index],
    1942                               length );
    1943 
    1944 #if DEBUG_PROCESS_EXEC_GET_STRINGS
    1945 if( DEBUG_PROCESS_EXEC_GET_STRINGS < cycle )
    1946 printk("\n[%s] thread[%x,%x] copied string[%d] <%s> to kernel buffer / length %d\n",
    1947 __FUNCTION__, this->process->pid, this->trdid, index, k_buf_ptr, length );
    1948 #endif
    1949 
    1950         // replace the user pointer by a kernel pointer in the k_pointer[] array
    1951         k_pointers[index] = k_buf_ptr;
    1952 
    1953         // increment loop variables
    1954         k_buf_ptr   += length;
    1955         k_buf_space -= length;
    1956 
    1957     }  // end loop on index
    1958 
    1959     // update into exec_info structure
    1960     if( is_args )
    1961     {
    1962         exec_info->args_pointers  =  k_pointers;
    1963         exec_info->args_nr        =  index;
    1964     }
    1965     else
    1966     {
    1967         exec_info->envs_pointers  =  k_pointers;
    1968         exec_info->envs_buf_free  =  k_buf_ptr;
    1969         exec_info->envs_nr        =  index;
    1970     }
    1971 
    1972 #if DEBUG_PROCESS_EXEC_GET_STRINGS
    1973 if( DEBUG_PROCESS_EXEC_GET_STRINGS < cycle )
    1974 printk("\n[%s] thread[%x,%x] copied %d strings to kernel buffer\n",
    1975 __FUNCTION__, this->process->pid, this->trdid, index );
    1976 #endif
    1977 
    1978     return 0;
    1979 
    1980 } // end process_exec_get_strings()
     1899#if DEBUG_PROCESS_MAKE_EXEC
     1900
     1901/////////////////////////////////////////////////////////////////////////////////////////
     1902// This static debug function displays the current state of the exec_info structure
     1903// embedded in the calling process descriptor.
     1904//
     1905// WARNING : It can be used after execution of the sys_exec function, but it cannot
     1906//           be used after execution of the process_make_exec() function, because the
     1907//           kernel pointers have been replaced by user pointers.
     1908/////////////////////////////////////////////////////////////////////////////////////////
     1909static void process_exec_info_display( bool_t args_ok,
     1910                                       bool_t envs_ok )
     1911{
     1912    uint32_t   i;
     1913    char     * str;    // local pointer on a string
     1914
     1915    process_t * process = CURRENT_THREAD->process;
     1916
     1917    // get relevant info from calling process descriptor
     1918    pid_t       pid      = process->pid;
     1919
     1920    uint32_t    args_nr  = process->exec_info.args_nr;
     1921    char     ** args     = process->exec_info.args_pointers;
     1922
     1923    uint32_t    envs_nr  = process->exec_info.envs_nr;
     1924    char     ** envs     = process->exec_info.envs_pointers;
     1925
     1926    char      * path     = process->exec_info.path;
     1927
     1928    // get pointers on TXT0 chdev
     1929    xptr_t    txt0_xp  = chdev_dir.txt_tx[0];
     1930    cxy_t     txt0_cxy = GET_CXY( txt0_xp );
     1931    chdev_t * txt0_ptr = GET_PTR( txt0_xp );
     1932
     1933    // get extended pointer on remote TXT0 lock
     1934    xptr_t  lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock );
     1935
     1936    // get TXT0 lock
     1937    remote_busylock_acquire( lock_xp );
     1938
     1939    nolock_printk("\n***** exec_info for process %x in cluster %x / %s\n",
     1940    pid , local_cxy , path );
     1941
     1942    // display arguments if required
     1943    if( args_ok )
     1944    {
     1945        for( i = 0 ; i < args_nr ; i++ )
     1946        {
     1947            str = args[i];
     1948            if( str != NULL)         // display pointer and string
     1949            nolock_printk(" - &arg[%d] = %x / arg[%d] = <%s>\n", i, str, i, str );
     1950            else                     // display WARNING
     1951            nolock_printk(" - unexpected NULL pointer for &arg[%d]\n", i );
     1952        }
     1953    }
     1954
     1955    // display env variables if required
     1956    if( envs_ok )
     1957    {
     1958        for( i = 0 ; i < envs_nr ; i++ )
     1959        {
     1960            str = envs[i];
     1961            if( str != NULL)     // display pointer and string
     1962            nolock_printk(" - &env[%d] = %x / env[%d] = <%s>\n", i, str, i, str );
     1963            else                     // display WARNING
     1964            nolock_printk(" - unexpected NULL pointer for &env[%d]\n", i );
     1965        }
     1966    }
     1967
     1968    // release TXT0 lock
     1969    remote_busylock_release( lock_xp );
     1970
     1971}  // end process_exec_info_display()
     1972
     1973#endif // DEBUG_PROCESS_MAKE_EXEC
    19811974
    19821975/////////////////////////////////
     
    20031996    uint32_t         envs_size;               // envs vseg size (bytes)
    20041997
     1998#if DEBUG_PROCESS_MAKE_EXEC || DEBUG_PROCESS_ERROR
     1999uint32_t cycle = (uint32_t)hal_get_cycles();
     2000#endif
     2001
    20052002    // get calling thread, process, pid, trdid, and ref_xp
    20062003    this    = CURRENT_THREAD;
     
    20142011
    20152012#if DEBUG_PROCESS_MAKE_EXEC
    2016 uint32_t cycle = (uint32_t)hal_get_cycles();
    20172013if( DEBUG_PROCESS_MAKE_EXEC < cycle )
    20182014printk("\n[%s] thread[%x,%x] enters for <%s> / cycle %d\n",
     
    20322028        if( error )
    20332029        {
    2034                 printk("\n[ERROR] in %s : thread[%x,%x] failed to open file <%s>\n",
    2035         __FUNCTION__, pid, trdid, elf_path );
     2030
     2031#if DEBUG_PROCESS_ERROR
     2032printk("\n[ERROR] in %s : thread[%x,%x] failed to open file <%s> / cycle %d\n",
     2033__FUNCTION__, pid, trdid, elf_path, cycle );
     2034#endif
    20362035                return -1;
    20372036        }
     
    20642063#endif
    20652064
    2066     // 4. register the "args" vseg in VSL and map it in GPT, if required
    2067     // this vseg contains both the array of pointers and the strings
     2065    // 4. register the "args" vseg in VSL and map it in GPT, if args_nr != 0.
     2066    //    As this vseg contains an array of pointers, the kernel pointers
     2067    //    are replaced by user pointers in new process space.
    20682068    args_nr = process->exec_info.args_nr;
    20692069
     
    20712071    {
    20722072        // get args vseg base and size in user space
    2073         args_base = CONFIG_VMM_UTILS_BASE << CONFIG_PPM_PAGE_SHIFT;
    2074         args_size = CONFIG_VMM_ARGS_SIZE << CONFIG_PPM_PAGE_SHIFT;
     2073        args_base = CONFIG_VMM_UTILS_BASE << CONFIG_PPM_PAGE_ORDER;
     2074        args_size = CONFIG_VMM_ARGS_SIZE << CONFIG_PPM_PAGE_ORDER;
    20752075
    20762076        // create and register args vseg in VMM
     
    20852085        if( vseg == NULL )
    20862086        {
    2087                  printk("\n[ERROR] in %s : thread[%x,%x] cannot get args vseg for <%s>\n",
    2088              __FUNCTION__, pid, trdid, elf_path );
     2087
     2088#if DEBUG_PROCESS_ERROR
     2089printk("\n[ERROR] in %s : thread[%x,%x] cannot create args vseg for <%s> / cycle %d\n",
     2090__FUNCTION__, pid, trdid, elf_path, cycle );
     2091#endif
    20892092                     return -1;
    20902093        }
     
    20982101}
    20992102#endif
    2100         // map all pages for this "args" vseg
     2103        // map all pages for the "args" vseg
    21012104        uint32_t fake_attr;   // required for hal_gpt_lock_pte()
    21022105        ppn_t    fake_ppn;    // required for hal_gpt_lock_pte()
    21032106
    2104         xptr_t   gpt  = XPTR( local_cxy , &process->vmm.gpt );
    2105         uint32_t attr = GPT_MAPPED | GPT_SMALL | GPT_READABLE | GPT_USER | GPT_CACHABLE;
    2106         vpn_t    vpn  = CONFIG_VMM_UTILS_BASE;
    2107         ppn_t    ppn  = ((ppn_t)process->exec_info.args_pointers >> CONFIG_PPM_PAGE_SHIFT);
     2107        xptr_t   base_xp = XPTR( local_cxy , process->exec_info.args_pointers );
     2108        xptr_t   gpt_xp  = XPTR( local_cxy , &process->vmm.gpt );
     2109        uint32_t attr    = GPT_MAPPED | GPT_SMALL | GPT_READABLE | GPT_USER | GPT_CACHABLE;
     2110        vpn_t    vpn     = CONFIG_VMM_UTILS_BASE;
     2111        ppn_t    ppn     = ppm_base2ppn( base_xp );
    21082112
    21092113        for( n = 0 ; n < CONFIG_VMM_ARGS_SIZE ; n++ ) 
    21102114        {
    21112115            // lock the PTE
    2112             if (hal_gpt_lock_pte( gpt , vpn , &fake_attr , &fake_ppn ) )
     2116            if (hal_gpt_lock_pte( gpt_xp , vpn + n , &fake_attr , &fake_ppn ) )
    21132117            {
    2114                 printk("\n[ERROR] in %s : thread[%x,%x] cannot map args vpn %x for <%s>\n",
    2115                 __FUNCTION__, pid, trdid, vpn, elf_path );
     2118
     2119#if DEBUG_PROCESS_ERROR
     2120printk("\n[ERROR] in %s : thread[%x,%x] cannot map vpn[%x] of args vseg for <%s> / cycle %d\n",
     2121__FUNCTION__, pid, trdid,  vpn + n , elf_path , cycle );
     2122#endif
    21162123                        return -1;
    21172124            }
    21182125
    21192126            // map and unlock the PTE
    2120             hal_gpt_set_pte( gpt , vpn + n , attr , ppn + n );
    2121         }
     2127            hal_gpt_set_pte( gpt_xp , vpn + n , attr , ppn + n );
     2128       }
    21222129
    21232130#if( DEBUG_PROCESS_MAKE_EXEC & 1 )
     
    21272134    __FUNCTION__, pid, trdid );
    21282135    hal_vmm_display( ref_xp , true );
     2136    process_exec_info_display( true , false );   // args & not envs
    21292137}
    21302138#endif
    21312139
    2132         // set user space pointers in array of pointers
    2133         char  ** ptr    = process->exec_info.args_pointers;
    2134 
     2140        // build pointer on args buffer in kernel space
     2141        char  ** k_args = process->exec_info.args_pointers;
     2142
     2143        // build pointer on args buffer in user space
     2144        char  ** u_args = (char **)args_base;
     2145
     2146        // set user space pointers in kernel args buffer
    21352147        for( n = 0 ; n < args_nr ; n++ )
    21362148        {
    2137             ptr[n] = ptr[n] + args_base - (intptr_t)ptr;
     2149            k_args[n] = (char *)((intptr_t)k_args[n] + (intptr_t)u_args - (intptr_t)k_args);
    21382150        }
    2139     }
    2140 
    2141     // 5. register the "envs" vseg in VSL and map it in GPT, if required
    2142     // this vseg contains both the array of pointers and the strings
     2151
     2152#if( DEBUG_PROCESS_MAKE_EXEC & 1 )
     2153if( DEBUG_PROCESS_MAKE_EXEC < cycle )
     2154printk("\n[%s] thread[%x,%x] args user pointers set in exec_info\n",
     2155__FUNCTION__, pid, trdid );
     2156#endif
     2157
     2158    }
     2159
     2160    // 5. register the "envs" vseg in VSL and map it in GPT, if envs_nr != 0.
     2161    //    As this vseg contains an array of pointers, the kernel pointers
     2162    //    are replaced by user pointers in new process space.
     2163
    21432164    envs_nr = process->exec_info.envs_nr;
    21442165
     
    21462167    {
    21472168        // get envs vseg base and size in user space from config
    2148         envs_base = (CONFIG_VMM_UTILS_BASE + CONFIG_VMM_ARGS_SIZE) << CONFIG_PPM_PAGE_SHIFT;
    2149         envs_size = CONFIG_VMM_ENVS_SIZE << CONFIG_PPM_PAGE_SHIFT;
    2150 
    2151         // TODO (inspired from args)
     2169        envs_base = (CONFIG_VMM_UTILS_BASE + CONFIG_VMM_ARGS_SIZE) << CONFIG_PPM_PAGE_ORDER;
     2170        envs_size = CONFIG_VMM_ENVS_SIZE << CONFIG_PPM_PAGE_ORDER;
     2171
     2172        // TODO (should be similar to the code for args above)
     2173
     2174#if( DEBUG_PROCESS_MAKE_EXEC & 1 )
     2175if( DEBUG_PROCESS_MAKE_EXEC < cycle )
     2176printk("\n[%s] thread[%x,%x] envs user pointers set in exec_info\n",
     2177__FUNCTION__, pid, trdid );
     2178#endif
     2179
    21522180    }
    21532181
     
    21562184    // register extended pointer on .elf file in process descriptor
    21572185        error = elf_load_process( file_xp , process );
     2186
    21582187    if( error )
    21592188        {
    2160                 printk("\n[ERROR] in %s : thread[%x,%x] failed to access <%s>\n",
    2161         __FUNCTION__, pid, trdid, elf_path );
     2189
     2190#if DEBUG_PROCESS_ERROR
     2191printk("\n[ERROR] in %s : thread[%x,%x] failed to access file <%s> / cycle %d\n",
     2192__FUNCTION__, pid, trdid , elf_path , cycle );
     2193#endif
    21622194        return -1;
    21632195        }
     
    21832215    if( vseg == NULL )
    21842216    {
    2185             printk("\n[ERROR] in %s : thread[%x,%x] cannot set u_stack vseg for <%s>\n",
    2186         __FUNCTION__, pid, trdid, elf_path );
     2217
     2218#if DEBUG_PROCESS_ERROR
     2219printk("\n[ERROR] in %s : thread[%x,%x] failed to set u_stack vseg for <%s> / cycle %d\n",
     2220__FUNCTION__, pid, trdid , elf_path , cycle );
     2221#endif
    21872222                return -1;
    21882223    }
     
    22052240    if( error )
    22062241    {
    2207         printk("\n[ERROR] in %s : thread[%x,%x] cannot update thread for <%s>\n",
    2208         __FUNCTION__ , pid, trdid, elf_path );
     2242
     2243#if DEBUG_PROCESS_ERROR
     2244printk("\n[ERROR] in %s : thread[%x,%x] failed to set main thread for <%s> / cycle %d\n",
     2245__FUNCTION__, pid, trdid , elf_path , cycle );
     2246#endif
    22092247        return -1;
    22102248    }
    22112249
     2250    // should not be reached, avoid a warning
    22122251        return 0;
    22132252
     
    22942333if( DEBUG_PROCESS_ZERO_CREATE < cycle )
    22952334printk("\n[%s] initialized hal specific VMM in cluster%x\n", __FUNCTION__, local_cxy );
     2335hal_vmm_display( XPTR( local_cxy , process ) , true );
    22962336#endif
    22972337
     
    23562396
    23572397    // allocates memory for process descriptor from local cluster
    2358         process = process_alloc();
     2398    process = kmem_alloc( bits_log2(sizeof(process_t)) , AF_ZERO );
    23592399    if( process == NULL )
    23602400    {
     
    25062546}  // end process_init_create()
    25072547
    2508 /////////////////////////////////////////
    2509 void process_display( xptr_t process_xp )
    2510 {
    2511     process_t   * process_ptr;
    2512     cxy_t         process_cxy;
     2548///////////////////////////////////////////////////
     2549uint32_t process_build_string( xptr_t   process_xp,
     2550                               char   * buffer,
     2551                               uint32_t size )
     2552{
     2553    int32_t       length;          // actual length of the string
     2554
     2555    process_t   * process_ptr;     // process descriptor local pointer
     2556    cxy_t         process_cxy;     // process descriptor cluster identifier
    25132557
    25142558    xptr_t        parent_xp;       // extended pointer on parent process
    2515     process_t   * parent_ptr;
    2516     cxy_t         parent_cxy;
     2559    process_t   * parent_ptr;      // parent process local pointer
     2560    cxy_t         parent_cxy;      // parent process cluster identifier
    25172561
    25182562    xptr_t        owner_xp;        // extended pointer on owner process
    2519     process_t   * owner_ptr;
    2520     cxy_t         owner_cxy;
    2521 
    2522     pid_t         pid;
    2523     pid_t         ppid;
    2524     lpid_t        lpid;
    2525     uint32_t      state;
    2526     uint32_t      th_nr;
     2563    process_t   * owner_ptr;       // owner process local pointer
     2564    cxy_t         owner_cxy;       // owner process cluster identifier
     2565
     2566    pid_t         pid;             // process identifier
     2567    pid_t         ppid;            // parent process identifier
     2568    lpid_t        lpid;            // local process identifier
     2569    uint32_t      state;           // terminaison state
     2570    uint32_t      th_nr;           // number of threads
    25272571
    25282572    xptr_t        txt_file_xp;     // extended pointer on TXT_RX file descriptor
     
    25402584    char          elf_name[CONFIG_VFS_MAX_NAME_LENGTH];
    25412585
     2586assert( __FUNCTION__ , (size >= 80 ) , "buffer size too small" );
     2587
    25422588    // get cluster and local pointer on process
    25432589    process_ptr = GET_PTR( process_xp );
     
    25662612    if( lpid )                                   // user process
    25672613    {
    2568 
    25692614        // get extended pointer on file descriptor associated to TXT_RX
    25702615        txt_file_xp = hal_remote_l64( XPTR( owner_cxy , &owner_ptr->fd_array.array[0] ) );
    25712616
    2572         assert( __FUNCTION__, (txt_file_xp != XPTR_NULL) ,
    2573         "process must be attached to one TXT terminal" );
     2617assert( __FUNCTION__, (txt_file_xp != XPTR_NULL) ,
     2618"process must be attached to one TXT terminal" );
    25742619
    25752620        // get TXT_RX chdev pointers
     
    25822627                           XPTR( txt_chdev_cxy , txt_chdev_ptr->name ) );
    25832628   
     2629        // get TXT_owner process
    25842630        txt_owner_xp = (xptr_t)hal_remote_l64( XPTR( txt_chdev_cxy,
    25852631                                                     &txt_chdev_ptr->ext.txt.owner_xp ) );
    2586 
    25872632        // get process .elf name
    25882633        elf_file_xp   = hal_remote_l64( XPTR( process_cxy , &process_ptr->vfs_bin_xp ) );
     
    25942639    else                                         // kernel process_zero
    25952640    {
    2596         // TXT name and .elf name are not registered in kernel process_zero
     2641        // TXT name and .elf name are not registered in kernel process
    25972642        strcpy( txt_name , "txt0_rx" );
    25982643        txt_owner_xp = process_xp;
     
    26032648    if( txt_owner_xp == process_xp )
    26042649    {
    2605         nolock_printk("PID %X | %s (FG) | %X | PPID %X | TS %X | %d | %s\n",
     2650        length = snprintk( buffer, size,
     2651        "PID %X | %s (FG) | %X | PPID %X | TS %X | %d | %s\n",
    26062652        pid, txt_name, process_ptr, ppid, state, th_nr, elf_name );
    26072653    }
    26082654    else
    26092655    {
    2610         nolock_printk("PID %X | %s (BG) | %X | PPID %X | TS %X | %d | %s\n",
     2656        length = snprintk( buffer, size,
     2657        "PID %X | %s (BG) | %X | PPID %X | TS %X | %d | %s\n",
    26112658        pid, txt_name, process_ptr, ppid, state, th_nr, elf_name );
    26122659    }
     2660
     2661    // check length
     2662    if( (length < 0) )
     2663    {
     2664        length = snprintk( buffer , size ,
     2665        "buffer too small for process %x in cluster %x", pid , process_cxy );
     2666    }
     2667
     2668    return length; 
     2669
     2670}  // end process_build_string()
     2671
     2672/////////////////////////////////////////
     2673void process_display( xptr_t process_xp )
     2674{
     2675    char  buffer[CONFIG_PROCESS_DISPLAY_BUF_SIZE];
     2676
     2677    // build the string to be displayed
     2678    process_build_string( process_xp,
     2679                          buffer,
     2680                          CONFIG_PROCESS_DISPLAY_BUF_SIZE );
     2681    // display the string
     2682    nolock_puts( buffer );
     2683
    26132684}  // end process_display()
    26142685
  • trunk/kernel/kern/process.h

    r669 r683  
    9898 * This structure defines the information required by the process_make_exec() function
    9999 * to create a new reference process descriptor, and the associated main thread.
    100  * All fields in this structure are filled by the sys_exec() function, using the
    101  * process_exec_get_strings() function.
     100 * All fields in this structure are filled by the sys_exec() function.
    102101 *
    103102 * It contains three parts:
     
    106105 * - the "envs_pointers" & "envs_nr" fields define the env variables (one env == one string).
    107106 *
    108  * For both the arguments, and the environment variables, the array of pointers and the
    109  * strings themselve are stored in kernel space in the same kernel buffer containing
    110  * an integer number of pages, defined by CONFIG_VMM_ARGS_SIZE and CONFIG_VMM_ENVS_SIZE.
    111  * This aligned kernel buffer (one or several contiguous physical pages) contains :
     107 * For both the arguments and the environment variables, the array of pointers and the
     108 * strings themselve are stored in the same kernel buffer. These kernel buffers contain
     109 * an integer number of contiguous pages, defined by the CONFIG_VMM_ARGS_SIZE and
     110 * CONFIG_VMM_ENVS_SIZE parameters respectively.
     111 * Each kernel (args / envs) buffer contains :
    112112 * - in the first bytes, a fixed size kernel array of pointers on the strings.
    113113 * - in the following bytes, the strings themselves.
    114  * The size of these arrays of pointers is defined by CONFIG_PROCESS_ARGS_MAX_NR
    115  * and CONFIG¨PROCESS_ENVS_MAX_NR.
    116  *
    117  * WARNING: The "args_pointers" & "envs_pointers" kernel buffer are directly mapped to
    118  *          the "args" and "envs" user vsegs to be accessed by the user process.
    119  *          Therefore, the arrays of pointers build by the sys_exec() function contain
    120  *          kernel pointers, but the process_make_exec() function replace these pointers
    121  *          by user pointers in the new process user space.
     114 * The size of these arrays of pointers is defined by the CONFIG_PROCESS_ARGS_MAX_NR and
     115 * CONFIG_PROCESS_ENVS_MAX_NR parameters respectively.
     116 *
     117 * WARNING (1) The "args_pointers[i]" & "envs_pointers[i] stored in the dynamically
     118 *             allocated kernel buffers are local pointers. They must be extended by the
     119 *             local cluster identifier to compute a valid PPN.
     120 * WARNING (2) The "args" & "envs" kernel buffers will be mapped to the "args" and "envs"
     121 *             user vsegs, to be accessed by the new user process.
     122 *             The process_make_exec() function must therefore replace the kernel pointers
     123 *             set by sys_exec(), by user pointers in the new process user space.
    122124 ********************************************************************************************/
    123125
     
    232234 * The process GPT is initialised as required by the target architecture.
    233235 * The "kcode" and "kdata" segments are registered in the process VSL.
     236 * This function does not return an error code: in case of failure, it print a PANIC message
     237 * on kernel terminal TXT0, and the core goes to sleep mode.
    234238 *********************************************************************************************
    235239 * @ process  : [in] pointer on process descriptor to initialize.
     
    241245/*********************************************************************************************
    242246 * This function allocates memory and initializes the "process_init" descriptor and the
    243  * associated "thread_init" descriptor. It is called once at the end of the kernel
    244  * initialisation procedure. Its local process identifier is 1, and parent process
    245  * is the kernel process in cluster 0.
     247 * associated "thread_init" descriptor. It is called once at the end of the kernel_init()
     248 * procedure. Its local process identifier is 1, and parent process is the kernel process.
    246249 * The "process_init" is the first user process, and all other user processes will be forked
    247250 * from this process. The code executed by "process_init" is stored in a .elf file, whose
    248251 * pathname is defined by the CONFIG_PROCESS_INIT_PATH configuration variable.
    249  * The process_init does not use the [STDIN/STDOUT/STDERR] streams, but it is linked
    250  * to kernel TXT0, because these streams must be defined for all user processes.
     252 * This function does not return an error code: in case of failure, it print a PANIC message
     253 * on kernel terminal TXT0, and the core goes to sleep mode.
    251254 ********************************************************************************************/
    252255void process_init_create( void );
     
    415418
    416419/*********************************************************************************************
    417  * This function is called twice by the sys_exec() function :
    418  * - to register the main() arguments (args) in the process <exec_info> structure.
    419  * - to register the environment variables (envs) in the <exec_info> structure.
    420  * In both cases the input is an array of NULL terminated string pointers in user space,
    421  * identified by the <u_pointers> argument. The strings can be dispatched anywhere in
    422  * the calling user process space. The max number of envs, and the max number of args are
    423  * defined by the CONFIG_PROCESS_ARGS_NR and CONFIG_PROCESS_ENVS_MAX_NR parameters.
    424  *********************************************************************************************
    425  * Implementation Note:
    426  * Both the array of pointers and the strings themselve are stored in kernel space in one
    427  * single, dynamically allocated, kernel buffer containing an integer number of pages,
    428  * defined by the CONFIG_VMM_ENVS_SIZE and CONFIG_VMM_STACK_SIZE parameters.
    429  * This aligned kernel buffer (one or several contiguous physical pages) contains :
    430  * - in the first bytes a fixed size kernel array of kernel pointers on the strings.
    431  * - in the following bytes the strings themselves.
    432  * All the pointers, and the actual number of strings are stored in the process exec_info
    433  * structure defined in the <process.h> file.
    434  *********************************************************************************************
    435  * @ is_args     : [in]    true if called for (args) / false if called for (envs).
    436  * @ u_pointers  : [in]    array of pointers on the strings (in user space).
    437  * @ exec_info   : [inout] pointer on the exec_info structure.
    438  * @ return 0 if success / non-zero if too many strings or no memory.
    439  ********************************************************************************************/
    440 error_t process_exec_get_strings( bool_t         is_args,
    441                                   char        ** u_pointers,
    442                                   exec_info_t  * exec_info );
    443 
    444 /*********************************************************************************************
    445420 * This function implements the "execve" system call, and is called by sys_exec() function.
    446421 * It must be called by the main thread of the calling "old" process.
     
    595570 * @ dst_xp   : extended pointer on the source process descriptor (in owner cluster).
    596571 * @ src_xp   : extended pointer on the destination process descriptor (in owner cluster).
    597  ********************************************************************************************/
    598 void process_fd_replicate( xptr_t dst_xp,
    599                            xptr_t src_xp );
     572 * @ return 0 if success / return -1 if failure
     573 ********************************************************************************************/
     574error_t process_fd_replicate( xptr_t dst_xp,
     575                              xptr_t src_xp );
    600576
    601577/*********************************************************************************************
     
    617593 ********************************************************************************************/
    618594void process_fd_display( xptr_t process_xp );
     595
     596/*********************************************************************************************
     597 * This utility function builds in the buffer defined by the <buffer> and <size> arguments
     598 * a printable string describing the current state of a process descriptor identified
     599 * by the <process_xp> argument, or a WARNING message if the buffer size is too small.
     600 *********************************************************************************************
     601 * @ process_xp  : extended pointer on target process descriptor.
     602 * @ buffer      : kernel buffer for string.
     603 * @ size        : buffer size in bytes.
     604 * @ return always the string length (not including NUL), that can be a warning message.
     605 ********************************************************************************************/
     606uint32_t process_build_string( xptr_t   process_xp,
     607                               char   * buffer,
     608                               uint32_t size );
    619609
    620610/********************   Thread Related Operations   *****************************************/
  • trunk/kernel/kern/scheduler.c

    r669 r683  
    22 * scheduler.c - Core scheduler implementation.
    33 *
    4  * Author    Alain Greiner (2016,2017,2018)
     4 * Author    Alain Greiner       (2016,2017,2018,2019,2020)
    55 *
    66 * Copyright (c)  UPMC Sorbonne Universites
     
    6363// @ returns pointer on selected thread descriptor
    6464////////////////////////////////////////////////////////////////////////////////////////////
    65 static thread_t * sched_select( scheduler_t * sched )
     65static thread_t * __attribute__((__noinline__))sched_select( scheduler_t * sched )
    6666{
    6767    thread_t     * thread;
     
    8383        while( done == false )
    8484        {
    85 
    86 // check kernel threads list
    87 assert( __FUNCTION__, (count < sched->k_threads_nr), "bad kernel threads list" );
    88 
    8985            // get next entry in kernel list
    9086            current = current->next;
     
    117113        while( done == false )
    118114        {
    119 
    120 // check user threads list
    121 assert( __FUNCTION__, (count < sched->u_threads_nr), "bad user threads list" );
    122 
    123115            // get next entry in user list
    124116            current = current->next;
  • trunk/kernel/kern/scheduler.h

    r662 r683  
    22 * scheduler.h - Core scheduler definition.
    33 *
    4  * Author    Alain Greiner (2016,2017,2018,2019,2020)
     4 * Author    Alain Greiner       (2016,2017,2018,2019,2020)
    55 *
    66 * Copyright (c) UPMC Sorbonne Universites
  • trunk/kernel/kern/thread.c

    r669 r683  
    33 *
    44 * Author  Ghassan Almaless (2008,2009,2010,2011,2012)
    5  *         Alain Greiner (2016,2017,2018,2019,2020)
     5 *         Alain Greiner    (2016,2017,2018,2019,2020)
    66 *
    77 * Copyright (c) UPMC Sorbonne Universites
     
    6767  }
    6868}
    69 
    70 /////////////////////////////////////////////////////////////////////////////////////
    71 // This static function allocates physical memory for a thread descriptor.
    72 // It can be called by the three functions:
    73 // - thread_user_create()
    74 // - thread_user_fork()
    75 // - thread_kernel_create()
    76 /////////////////////////////////////////////////////////////////////////////////////
    77 // @ return pointer on thread descriptor if success / return NULL if failure.
    78 /////////////////////////////////////////////////////////////////////////////////////
    79 static thread_t * thread_alloc( void )
    80 {
    81         kmem_req_t     req;    // kmem request
    82 
    83         // allocates memory for thread descriptor + kernel stack
    84         req.type  = KMEM_PPM;
    85         req.order = CONFIG_THREAD_DESC_ORDER;
    86         req.flags = AF_KERNEL | AF_ZERO;
    87 
    88     return kmem_alloc( &req );
    89 
    90 }  // end thread_alloc()
    91  
    9269
    9370/////////////////////////////////////////////////////////////////////////////////////
     
    144121
    145122#if DEBUG_BUSYLOCK
    146     xlist_root_init( XPTR( local_cxy , &thread->busylocks_root ) );
     123xlist_root_init( XPTR( local_cxy , &thread->busylocks_root ) );
    147124#endif
    148125
     
    161138    list_entry_init( &thread->sched_list );
    162139
    163     // initialize the embedded alarm to unlink
     140    // initialize the embedded alarm
    164141    list_entry_init( &thread->alarm.list );
    165142
     
    187164    dqdt_increment_threads();
    188165
     166    // nitialize timer alarm
     167    alarm_init( &thread->alarm );
     168
    189169#if CONFIG_INSTRUMENTATION_PGFAULTS
    190     thread->info.false_pgfault_nr    = 0;
    191     thread->info.false_pgfault_cost  = 0;
    192     thread->info.false_pgfault_max   = 0;
    193     thread->info.local_pgfault_nr    = 0;
    194     thread->info.local_pgfault_cost  = 0;
    195     thread->info.local_pgfault_max   = 0;
    196     thread->info.global_pgfault_nr   = 0;
    197     thread->info.global_pgfault_cost = 0;
    198     thread->info.global_pgfault_max  = 0;
     170thread->info.false_pgfault_nr    = 0;
     171thread->info.false_pgfault_cost  = 0;
     172thread->info.false_pgfault_max   = 0;
     173thread->info.local_pgfault_nr    = 0;
     174thread->info.local_pgfault_cost  = 0;
     175thread->info.local_pgfault_max   = 0;
     176thread->info.global_pgfault_nr   = 0;
     177thread->info.global_pgfault_cost = 0;
     178thread->info.global_pgfault_max  = 0;
    199179#endif
    200180
     
    273253
    274254    // allocate memory for thread descriptor
    275     thread = thread_alloc();
     255    thread = kmem_alloc( CONFIG_THREAD_DESC_ORDER , AF_ZERO );
    276256
    277257    if( thread == NULL )
     
    467447
    468448    // allocate memory for child thread descriptor
    469     child_ptr = thread_alloc();
     449    child_ptr = kmem_alloc( CONFIG_THREAD_DESC_ORDER , AF_ZERO );
    470450
    471451    if( child_ptr == NULL )
     
    677657uint32_t cycle = (uint32_t)hal_get_cycles();
    678658if( DEBUG_THREAD_USER_EXEC < cycle )
    679 printk("\n[%s] thread[%x,%x] enter / cycle %d\n",
    680 __FUNCTION__, process->pid, thread->trdid, cycle );
     659printk("\n[%s] thread[%x,%x] enter / argc %d / argv %x / cycle %d\n",
     660__FUNCTION__, process->pid, thread->trdid, argc, argv, cycle );
    681661#endif
    682662
     
    727707#endif
    728708
    729     // restore CPU registers ... and jump to user code
     709    // restore CPU registers => jump to user code
    730710    hal_do_cpu_restore( thread->cpu_context );
    731711
     
    759739
    760740    // allocate memory for new thread descriptor
    761     thread = thread_alloc();
     741    thread = kmem_alloc( CONFIG_THREAD_DESC_ORDER , AF_ZERO );
    762742
    763743    if( thread == NULL )
     
    839819
    840820// check arguments
    841 assert( __FUNCTION__, (type == THREAD_IDLE) , "illegal thread type" );
    842 assert( __FUNCTION__, (core_lid < LOCAL_CLUSTER->cores_nr) , "illegal core index" );
     821assert( __FUNCTION__, (type == THREAD_IDLE),
     822"illegal thread type" );
     823
     824assert( __FUNCTION__, (core_lid < LOCAL_CLUSTER->cores_nr),
     825"illegal core index" );
    843826
    844827    // set type in thread descriptor
     
    848831    error = process_register_thread( &process_zero , thread , &trdid );
    849832
    850 assert( __FUNCTION__, (error == 0), "cannot register idle_thread in kernel process" );
     833assert( __FUNCTION__, (error == 0),
     834"cannot register idle_thread in kernel process" );
    851835
    852836    // set trdid in thread descriptor
     
    863847                         NULL );   // no user stack for a kernel thread
    864848
    865 assert( __FUNCTION__, (error == 0), "cannot initialize idle_thread" );
     849assert( __FUNCTION__, (error == 0),
     850"cannot initialize idle_thread" );
    866851
    867852    // allocate CPU context
    868853    error = hal_cpu_context_alloc( thread );
    869854
    870 assert( __FUNCTION__, (error == 0), "cannot allocate CPU context" );
     855assert( __FUNCTION__,(error == 0),
     856"cannot allocate CPU context" );
    871857
    872858    // initialize CPU context
     
    963949
    964950    // release memory for thread descriptor (including kernel stack)
    965     kmem_req_t   req;
    966     req.type  = KMEM_PPM;
    967     req.ptr   = thread;
    968     kmem_free( &req );
     951    kmem_free( thread , CONFIG_THREAD_DESC_ORDER );
    969952
    970953#if DEBUG_THREAD_DESTROY
     
    10911074}  // end thread_unblock()
    10921075
    1093 //////////////////////////////////////
     1076//////////////////////////////////////////////
    10941077void thread_delete_request( xptr_t  target_xp,
    1095                     bool_t  is_forced )
     1078                            bool_t  is_forced )
    10961079{
    10971080    reg_t       save_sr;                // for critical section
     
    14751458        thread->busylocks - 1, (uint32_t)hal_get_cycles() );
    14761459
    1477 #if DEBUG_BUSYLOCK
     1460#if DEBUG_BUSYLOCK_TYPE
    14781461
    14791462// scan list of busylocks
  • trunk/kernel/kern/thread.h

    r669 r683  
    33 *
    44 * Author  Ghassan Almaless (2008,2009,2010,2011,2012)
    5  *         Alain Greiner (2016,2017,2018,2019,2020)
     5 *         Alain Greiner    (2016,2017,2018,2019,2020)
    66 *
    77 * Copyright (c) UPMC Sorbonne Universites
     
    9696#define THREAD_BLOCKED_LOCK      0x1000  /*! ANY : wait queuelock or rwlock           */
    9797#define THREAD_BLOCKED_CLIENT    0x2000  /*! DEV : wait clients queue non empty       */
    98 #define THREAD_BLOCKED_ALARM     0x4000  /*! ANY : wait a timer based alarm           */
     98#define THREAD_BLOCKED_SLEEP     0x4000  /*! ANY : wait a timer based alarm           */
    9999
    100100/***************************************************************************************
  • trunk/kernel/kernel_config.h

    r675 r683  
    2525#define _KERNEL_CONFIG_H_
    2626
    27 ////////////////////////////////////////////////////////////////////////////////////////////
     27////////////////////////////////////////////////////////////////////////////////////////
    2828//                              KERNEL DEBUG
    29 //  Each debug variable control one kernel function, or one small group of functions.
    30 //  - trace is generated only when cycle > debug_value.
    31 //  - detailed trace is enabled when (debug_value & Ox1) is non zero.
    32 ////////////////////////////////////////////////////////////////////////////////////////////
     29//
     30// 1) All errors detected by the kernel caused by a system call are reported to the
     31//    user process using the ERRNO mechanism. Moreover, the DEBUG_***_ERROR variables
     32//    force the compiler to display on TXT0 the error messages generated by
     33//    the low-level kernel function, to help the error cause analysis.
     34//
     35// 2) The other debug variable forece the compiler to display on TXT0 a trace for one
     36//    specific kernel function, or one small group of functions.
     37//    All these trace variables (but locks, kmem, kcm) respect the following rules:
     38//    - the trace is generated if the debug variable is non zeo.
     39//    - trace is generated only when cycle > debug_value.
     40//    - detailed trace is enabled when (debug_value & 0x1) is non zero.
     41////////////////////////////////////////////////////////////////////////////////////////
     42
     43// error reporting variables
     44
     45#define DEBUG_DEV_NIC_ERROR               1
     46#define DEBUG_KCM_ERROR                   1
     47#define DEBUG_KMEM_ERROR                  1
     48#define DEBUG_MAPPER_ERROR                1
     49#define DEBUG_PPM_ERROR                   1
     50#define DEBUG_PROCESS_ERROR               1
     51#define DEBUG_SOCKET_ERROR                1
     52#define DEBUG_SYSCALLS_ERROR              1
     53#define DEBUG_THREAD_ERROR                1
     54#define DEBUG_USER_DIR_ERROR              1
     55#define DEBUG_VFS_ERROR                   1
     56#define DEBUG_VMM_ERROR                   1
     57
     58
     59// trace activation variables
    3360
    3461#define DEBUG_BARRIER_CREATE              0
     
    3663#define DEBUG_BARRIER_WAIT                0
    3764
    38 #define DEBUG_BUSYLOCK_TYPE               0
    39 #define DEBUG_BUSYLOCK_PID                0        
    40 #define DEBUG_BUSYLOCK_TRDID              0      
     65#define DEBUG_BUSYLOCK_TYPE               0   // type 0 undefined => no debug
     66#define DEBUG_BUSYLOCK_PID                0   // owner process PID
     67#define DEBUG_BUSYLOCK_TRDID              0   // owner thread TRDID
    4168                 
    4269#define DEBUG_CHDEV_CMD_RX                0
     
    115142
    116143#define DEBUG_KCM                         0
    117 #define DEBUG_KCM_REMOTE                  0
     144#define DEBUG_KCM_ORDER                   0    // filter for DEBUG_KCM
     145#define DEBUG_KCM_CXY                     0    // filter for DEBUG_KCM
     146
     147#define DEBUG_KERNEL_INIT                 0
    118148
    119149#define DEBUG_KMEM                        0
    120 #define DEBUG_KMEM_REMOTE                 0
    121 
    122 #define DEBUG_KERNEL_INIT                 0
     150#define DEBUG_KMEM_ORDER                  0    // filter for DEBUG_KMEM
     151#define DEBUG_KMEM_CXY                    0    // filter for DEBUG_KMEM
    123152
    124153#define DEBUG_MAPPER_GET_PAGE             0
     
    133162#define DEBUG_PPM_ALLOC_PAGES             0
    134163#define DEBUG_PPM_FREE_PAGES              0
    135 #define DEBUG_PPM_REMOTE_ALLOC_PAGES      0
    136 #define DEBUG_PPM_REMOTE_FREE_PAGES       0
    137164   
    138165#define DEBUG_PROCESS_COPY_INIT           0
    139166#define DEBUG_PROCESS_DESTROY             0
    140 #define DEBUG_PROCESS_EXEC_GET_STRINGS    0
    141167#define DEBUG_PROCESS_FD_REGISTER         0
    142168#define DEBUG_PROCESS_FD_REMOVE           0
     
    151177#define DEBUG_PROCESS_ZERO_CREATE         0
    152178
    153 #define DEBUG_QUEUELOCK_TYPE              0           // type 0 undefined => no debug
    154 #define DEBUG_QUEUELOCK_PTR               0
    155 #define DEBUG_QUEUELOCK_CXY               0
     179#define DEBUG_QUEUELOCK_TYPE              0   // type 0 undefined => no debug
     180#define DEBUG_QUEUELOCK_PTR               0   // lock local pointer
     181#define DEBUG_QUEUELOCK_CXY               0   // lock cluster identifier
    156182
    157183#define DEBUG_RPC_CLIENT_GENERIC          0
     
    170196#define DEBUG_RPC_VMM_SET_COW             0
    171197
    172 #define DEBUG_RWLOCK_TYPE                 0           // type 0 undefined => no debug
    173 #define DEBUG_RWLOCK_PTR                  0
    174 #define DEBUG_RWLOCK_CXY                  0
     198#define DEBUG_RWLOCK_TYPE                 0   // type 0 undefined => no debug
     199#define DEBUG_RWLOCK_PTR                  0   // lock local pointer
     200#define DEBUG_RWLOCK_CXY                  0   // lock cluster identifier
    175201
    176202#define DEBUG_SCHED_HANDLE_SIGNALS        0
     
    189215#define DEBUG_SOCKET_DESTROY              0
    190216#define DEBUG_SOCKET_LISTEN               0
     217#define DEBUG_SOCKET_SEND                 0
    191218#define DEBUG_SOCKET_RECV                 0
    192 #define DEBUG_SOCKET_SEND                 0
    193219#define DEBUG_SOCKET_LINK                 0
    194 
    195 #define DEBUG_SYSCALLS_ERROR              0
    196220
    197221#define DEBUG_SYS_BARRIER                 0
     
    205229#define DEBUG_SYS_FG                      0
    206230#define DEBUG_SYS_FORK                    0
    207 #define DEBUG_SYS_GET_CONFIG              0
     231#define DEBUG_SYS_GET                     0 
    208232#define DEBUG_SYS_GETCWD                  0
    209233#define DEBUG_SYS_GETPID                  0
    210 #define DEBUG_SYS_GET_BEST_CORE           0
    211 #define DEBUG_SYS_GET_CORE_ID             0
    212 #define DEBUG_SYS_GET_NB_CORES            0
    213 #define DEBUG_SYS_GET_THREAD_INFO         0
    214234#define DEBUG_SYS_ISATTY                  0
    215235#define DEBUG_SYS_IS_FG                   0
     
    250270#define DEBUG_THREAD_USER_EXEC            0
    251271
    252 #define DEBUG_USER_DIR                    0
    253 
    254 #define DEBUG_VFS_ERROR                   0
     272#define DEBUG_USER_DIR_CREATE             0
     273#define DEBUG_USER_DIR_DESTROY            0
    255274
    256275#define DEBUG_VFS_ADD_CHILD               0
     
    277296#define DEBUG_VFS_UNLINK                  0
    278297
     298
    279299#define DEBUG_VMM_CREATE_VSEG             0
    280300#define DEBUG_VMM_DESTROY                 0
     
    352372////////////////////////////////////////////////////////////////////////////////////////////
    353373
    354 #define CONFIG_VERSION           "Version 2.3 / November 2019"
     374#define CONFIG_VERSION           "Version 2.4 / November 2020"
    355375
    356376////////////////////////////////////////////////////////////////////////////////////////////
     
    370390#define CONFIG_CLUSTER_SPAN                 32         // ln(phys. address space per cluster)
    371391#define CONFIG_CACHE_LINE_SIZE              64         // number of bytes in cache line
     392#define CONFIG_CACHE_LINE_ORDER             6          // ln( cache line size )
    372393
    373394#define CONFIG_CACHE_LINE_ALIGNED           __attribute__((aligned(CONFIG_CACHE_LINE_SIZE)))
     
    389410
    390411////////////////////////////////////////////////////////////////////////////////////////////
     412//                                  DQDT       
     413////////////////////////////////////////////////////////////////////////////////////////////
     414
     415#define CONFIG_DQDT_LEVELS_NR               5
     416
     417////////////////////////////////////////////////////////////////////////////////////////////
     418//                              FBF WINDOWS       
     419////////////////////////////////////////////////////////////////////////////////////////////
     420
     421#define CONFIG_FBF_WINDOWS_MAX_NR           64         // max number of windows
     422#define CONFIG_FBF_WINDOWS_MAX_WIDTH        1024       // max number of pixels in FBF line
     423#define CONFIG_FBF_WINDOWS_MAX_HEIGHT       1024       // max number of lines in FBF
     424
     425////////////////////////////////////////////////////////////////////////////////////////////
    391426//                            PROCESS MANAGEMENT       
    392427////////////////////////////////////////////////////////////////////////////////////////////
    393428
     429#define CONFIG_PROCESS_INIT_PATH            "/bin/user/init.elf"
    394430#define CONFIG_MAX_PROCESS_PER_CLUSTER      16         // max number of owned process
    395431#define CONFIG_PROCESS_ARGS_MAX_NR          4          // max number of args per process
     
    399435#define CONFIG_PROCESS_HEAP_MIN_SIZE        0x00010000 // user heap min size (bytes)
    400436#define CONFIG_PROCESS_HEAP_MAX_SIZE        0x30000000 // user heap max size (bytes)
    401 #define CONFIG_PROCESS_INIT_PATH            "/bin/user/init.elf"
     437#define CONFIG_PROCESS_DISPLAY_BUF_SIZE     128        // display one process on one line
     438
     439////////////////////////////////////////////////////////////////////////////////////////////
     440//                          PHYSICAL MEMORY MANAGEMENT         
     441////////////////////////////////////////////////////////////////////////////////////////////
     442
     443#define CONFIG_PPM_PAGE_SIZE          4096          // physical page size (bytes)
     444#define CONFIG_PPM_PAGE_ORDER         12            // ln(physical page size)
     445#define CONFIG_PPM_PAGE_MASK          0x00000FFF    // physical page mask     
     446#define CONFIG_PPM_MAX_ORDER          16            // ln(total number of pages per cluster)
     447#define CONFIG_PPM_MAX_RSVD           32            // max reserved zones on the machine
     448
     449#define CONFIG_PPM_PAGE_ALIGNED       __attribute__((aligned(CONFIG_PPM_PAGE_SIZE)))
     450
     451////////////////////////////////////////////////////////////////////////////////////////////
     452//                              RANDOM NUMBERS
     453////////////////////////////////////////////////////////////////////////////////////////////
     454
     455#define CONFIG_RDNG_PARAM_A                 65519
     456#define CONFIG_RDNG_PARAM_C                 64037
     457
     458////////////////////////////////////////////////////////////////////////////////////////////
     459//                             REMOTE PROCEDURE CALL
     460////////////////////////////////////////////////////////////////////////////////////////////
     461
     462#define CONFIG_RPC_FIFO_SLOTS                   16
     463#define CONFIG_RPC_FIFO_MAX_ITERATIONS      1024
     464#define CONFIG_RPC_THREADS_MAX              4       // max number of RPC threads per core
     465
     466////////////////////////////////////////////////////////////////////////////////////////////
     467//                                SCHEDULING
     468////////////////////////////////////////////////////////////////////////////////////////////
     469
     470#define CONFIG_SCHED_TICKS_PER_SECOND       1        // number of TICKS per seconds   
     471#define CONFIG_SCHED_TICKS_PER_QUANTUM      1        // number of ticks between scheduling
     472#define CONFIG_SCHED_MAX_THREADS_NR         32       // max number of threads per core
     473#define CONFIG_SCHED_IDLE_MODE_SLEEP        0        // idle thread use sleep mode if non 0
     474
     475////////////////////////////////////////////////////////////////////////////////////////////
     476//                                TCP/UDP/IP
     477////////////////////////////////////////////////////////////////////////////////////////////
     478
     479#define CONFIG_SOCK_ISS_CLIENT        0x10000      // initial sequence number for TCP client
     480#define CONFIG_SOCK_ISS_SERVER        0x20000      // initial sequence number for TCP server
     481#define CONFIG_SOCK_MAX_WINDOW        0xFFFFF      // initial window (bytes) for TCP
     482#define CONFIG_SOCK_RETRY_TIMEOUT     1000000      // number of cycles before retry for TCP
     483#define CONFIG_SOCK_QUEUES_DEPTH      4            // max number of packets in RX/TX queues
     484#define CONFIG_SOCK_RX_BUF_ORDER      20           // ln( number of bytes in socket rx_buf )
     485#define CONFIG_SOCK_TX_BUF_ORDER      20           // ln( number of bytes in socket tx_buf )
     486#define CONFIG_SOCK_R2T_BUF_SIZE      8            // max number of requests in R2T queue
     487#define CONFIG_SOCK_CRQ_BUF_SIZE      8            // max number of requests in CRQ queue
     488#define CONFIG_SOCK_PKT_BUF_SIZE      2048         // max length for one ETH/IP/TCP packet
     489#define CONFIG_SOCK_PAYLOAD_MAX       1500         // max user payload length for packet
     490
     491////////////////////////////////////////////////////////////////////////////////////////////
     492//                                 THREADS
     493////////////////////////////////////////////////////////////////////////////////////////////
     494
     495#define CONFIG_THREADS_MAX_PER_CLUSTER      32       // max threads per cluster per process
     496#define CONFIG_THREAD_DESC_SIZE             0x4000   // thread desc size (with kernel stack)
     497#define CONFIG_THREAD_DESC_ORDER            14       // ln( number of bytes )
    402498
    403499////////////////////////////////////////////////////////////////////////////////////////////
     
    419515#define CONFIG_MAPPER_GRDXT_W2              7          // number of bits for RADIX_TREE_IX2
    420516#define CONFIG_MAPPER_GRDXT_W3              7          // number of bits for RADIX_TREE_IX3
    421 
    422 ////////////////////////////////////////////////////////////////////////////////////////////
    423 //                              FBF WINDOWS       
    424 ////////////////////////////////////////////////////////////////////////////////////////////
    425 
    426 #define CONFIG_FBF_WINDOWS_MAX_NR           64         // max number of windows
    427 #define CONFIG_FBF_WINDOWS_MAX_WIDTH        1024       // max number of pixels in FBF line
    428 #define CONFIG_FBF_WINDOWS_MAX_HEIGHT       1024       // max number of lines in FBF
    429 
    430 ////////////////////////////////////////////////////////////////////////////////////////////
    431 //                                  DQDT       
    432 ////////////////////////////////////////////////////////////////////////////////////////////
    433 
    434 #define CONFIG_DQDT_LEVELS_NR               5
    435 
    436 ////////////////////////////////////////////////////////////////////////////////////////////
    437 //                              RANDOM NUMBERS
    438 ////////////////////////////////////////////////////////////////////////////////////////////
    439 
    440 #define CONFIG_RDNG_PARAM_A                 65519
    441 #define CONFIG_RDNG_PARAM_C                 64037
    442 
    443 ////////////////////////////////////////////////////////////////////////////////////////////
    444 //                                SCHEDULING
    445 ////////////////////////////////////////////////////////////////////////////////////////////
    446 
    447 #define CONFIG_SCHED_TICK_MS_PERIOD         10000    // number of milliseconds per period
    448 #define CONFIG_SCHED_TICKS_PER_QUANTUM      1        // number of ticks between scheduling
    449 #define CONFIG_SCHED_MAX_THREADS_NR         32       // max number of threads per core
    450 #define CONFIG_SCHED_IDLE_MODE_SLEEP        0        // idle thread use sleep mode if non 0
    451 
    452 ////////////////////////////////////////////////////////////////////////////////////////////
    453 //                                 THREADS
    454 ////////////////////////////////////////////////////////////////////////////////////////////
    455 
    456 #define CONFIG_THREADS_MAX_PER_CLUSTER      32       // max threads per cluster per process
    457 #define CONFIG_THREAD_DESC_SIZE             0x4000   // thread desc size (with kernel stack)
    458 #define CONFIG_THREAD_DESC_ORDER            2        // ln( number of 4K pages )
    459 
    460 ////////////////////////////////////////////////////////////////////////////////////////////
    461 //                             REMOTE PROCEDURE CALL
    462 ////////////////////////////////////////////////////////////////////////////////////////////
    463 
    464 #define CONFIG_REMOTE_FIFO_SLOTS                    16
    465 #define CONFIG_REMOTE_FIFO_MAX_ITERATIONS   1024
    466 #define CONFIG_RPC_THREADS_MAX              4       // max number of RPC threads per core
    467517
    468518////////////////////////////////////////////////////////////////////////////////////////////
     
    479529#define CONFIG_VMM_ARGS_SIZE          0x000001      // args vseg size         : 4   Kbytes
    480530#define CONFIG_VMM_ENVS_SIZE          0x000004      // envs vseg size         : 16  Kbytes
    481 #define CONFIG_VMM_STACK_SIZE         0x001000      // single stack vseg size : 16  Mbytes
    482 
    483 #define CONFIG_VMM_HEAP_MAX_ORDER     18           // max size of MMAP vseg  :  1   Gbytes
    484 
    485 ////////////////////////////////////////////////////////////////////////////////////////////
    486 //                      PHYSICAL MEMORY MANAGEMENT         
    487 ////////////////////////////////////////////////////////////////////////////////////////////
    488 
    489 #define CONFIG_PPM_PAGE_SIZE          4096          // physical page size (bytes)
    490 #define CONFIG_PPM_PAGE_SHIFT         12            // physical page shift (bits)
    491 #define CONFIG_PPM_PAGE_MASK          0x00000FFF    // physical page mask     
    492 #define CONFIG_PPM_MAX_ORDER          16            // ln(total number of pages per cluster)
    493 #define CONFIG_PPM_HEAP_ORDER         10            // ln(number of heap pages per cluster)
    494 #define CONFIG_PPM_MAX_RSVD           32            // max reserved zones on the machine
    495 
    496 #define CONFIG_PPM_PAGE_ALIGNED       __attribute__((aligned(CONFIG_PPM_PAGE_SIZE)))
     531#define CONFIG_VMM_STACK_SIZE         0x000100      // single stack vseg size : 1   Mbytes
     532
     533#define CONFIG_VMM_HEAP_MAX_ORDER     18            // max size of MMAP vseg  : 1   Gbytes
    497534
    498535////////////////////////////////////////////////////////////////////////////////////////////
     
    502539#define CONFIG_PRINTK_BUF_SIZE        0x800        // max length of a formated string
    503540#define CONFIG_PIPE_BUF_SIZE          0x1000       // max number of bytes in a pipe buffer
    504 #define CONFIG_SOCK_RX_BUF_SIZE       0x100000     // max number of bytes in  RX buffer
    505 #define CONFIG_SOCK_R2T_BUF_SIZE      0x64         // max number of requests in R2T queue
    506 #define CONFIG_SOCK_CRQ_BUF_SIZE      0x8          // max number of requests in CRQ queue
    507 #define CONFIG_SOCK_PKT_BUF_SIZE      0x800        // max length for one ETH/IP/TCP packet
    508541
    509542////////////////////////////////////////////////////////////////////////////////////////////
  • trunk/kernel/libk/elf.c

    r671 r683  
    161161                {
    162162                        type                       = VSEG_TYPE_CODE;
    163                         process->vmm.code_vpn_base = vbase >> CONFIG_PPM_PAGE_SHIFT;
     163                        process->vmm.code_vpn_base = vbase >> CONFIG_PPM_PAGE_ORDER;
    164164                }
    165165                else               // found DATA segment
    166166                {
    167167                        type                       = VSEG_TYPE_DATA;
    168                         process->vmm.data_vpn_base = vbase >> CONFIG_PPM_PAGE_SHIFT;
     168                        process->vmm.data_vpn_base = vbase >> CONFIG_PPM_PAGE_ORDER;
    169169                }
    170170
     
    215215{
    216216    uint32_t     new_offset;       // unused, required by vfs_lseek()
    217         kmem_req_t   req;              // kmem request for program header
    218217        Elf_Ehdr     header;           // local buffer for .elf header
    219218        void       * segs_base;        // pointer on buffer for segment descriptors array
     
    278277
    279278        // allocate memory for segment descriptors array
    280         req.type  = KMEM_KCM;
    281         req.order = bits_log2(segs_size);
    282         req.flags = AF_KERNEL;
    283         segs_base = kmem_alloc( &req );
     279        segs_base = kmem_alloc( bits_log2(segs_size) , AF_NONE );
    284280
    285281        if( segs_base == NULL )
     
    295291        {
    296292                printk("\n[ERROR] in %s : cannot seek for descriptors array\n", __FUNCTION__ );
    297                 req.ptr = segs_base;
    298                 kmem_free( &req );
     293                kmem_free( segs_base , bits_log2(segs_size) );
    299294                return -1;
    300295        }
     
    314309        {
    315310                printk("\n[ERROR] in %s : cannot read segments descriptors\n", __FUNCTION__ );
    316                 req.ptr = segs_base;
    317                 kmem_free( &req );
     311                kmem_free( segs_base , bits_log2(segs_size) );
    318312                return -1;
    319313        }
     
    331325        if( error )
    332326        {
    333                 req.ptr = segs_base;
    334                 kmem_free( &req );
     327                printk("\n[ERROR] in %s : cannot register segments descriptors\n", __FUNCTION__ );
     328                kmem_free( segs_base , bits_log2(segs_size) );
    335329                return -1;
    336330        }
     
    343337
    344338        // release allocated memory for program header
    345         req.ptr = segs_base;
    346         kmem_free(&req);
     339    kmem_free( segs_base , bits_log2(segs_size) );
    347340
    348341#if DEBUG_ELF_LOAD
  • trunk/kernel/libk/grdxt.c

    r671 r683  
    4040                    uint32_t  ix3_width )
    4141{
     42
     43assert( __FUNCTION__, (rt != NULL),
     44"pointer on radix tree is NULL\n" );
     45
    4246    void      ** root;
    43         kmem_req_t   req;
    4447 
    4548        rt->ix1_width = ix1_width;
     
    4851
    4952    // allocates first level array
    50         req.type  = KMEM_KCM;
    51         req.order = ix1_width + ( (sizeof(void*) == 4) ? 2 : 3 );
    52         req.flags = AF_KERNEL | AF_ZERO;
    53         root = kmem_alloc( &req );
     53        uint32_t order = ix1_width + ( (sizeof(void*) == 4) ? 2 : 3 );
     54        root = kmem_alloc( order , AF_ZERO );
    5455
    5556        if( root == NULL )
     
    6869void grdxt_destroy( grdxt_t * rt )
    6970{
    70         kmem_req_t req;
     71
     72assert( __FUNCTION__, (rt != NULL),
     73"pointer on radix tree is NULL\n" );
     74
     75    uint32_t   order;
    7176
    7277    uint32_t   w1 = rt->ix1_width;
     
    8186        uint32_t   ix2;
    8287        uint32_t   ix3;
    83 
    84 assert( __FUNCTION__, (rt != NULL) , "pointer on radix tree is NULL\n" );
    8588
    8689        for( ix1=0 ; ix1 < (uint32_t)(1 << w1) ; ix1++ )
     
    106109
    107110            // release level 3 array
    108             req.type = KMEM_KCM;
    109                     req.ptr  = ptr3;
    110                     kmem_free( &req );
     111                order = w3 + ( (sizeof(void*) == 4) ? 2 : 3 );
     112                    kmem_free( ptr3 , order );
    111113        }
    112114
    113115        // release level 2 array
    114         req.type = KMEM_KCM;
    115                 req.ptr  = ptr2;
    116                 kmem_free( &req );
     116            order = w2 + ( (sizeof(void*) == 4) ? 2 : 3 );
     117                kmem_free( ptr2 , order );
    117118    }
    118119
    119120    // release level 1 array
    120     req.type = KMEM_KCM;
    121         req.ptr  = ptr1;
    122         kmem_free( &req );
     121        order = w1 + ( (sizeof(void*) == 4) ? 2 : 3 );
     122        kmem_free( ptr1 , order );
    123123
    124124}  // end grdxt_destroy()
     
    129129                      void     * value )
    130130{
    131         kmem_req_t      req;
     131    uint32_t        order;
    132132
    133133    uint32_t        w1 = rt->ix1_width;
     
    136136
    137137// Check key value
    138 assert( __FUNCTION__, ((key >> (w1 + w2 + w3)) == 0 ), "illegal key value %x\n", key );
     138assert( __FUNCTION__, ((key >> (w1 + w2 + w3)) == 0 ),
     139"illegal key value %x\n", key );
    139140
    140141    // compute indexes
     
    155156        {
    156157        // allocate memory for level 2 array
    157         req.type  = KMEM_KCM;
    158         req.order = w2 + ( (sizeof(void*) == 4) ? 2 : 3 );
    159         req.flags = AF_KERNEL | AF_ZERO;
    160         ptr2 = kmem_alloc( &req );
     158        order = w2 + ( (sizeof(void*) == 4) ? 2 : 3 );
     159        ptr2 = kmem_alloc( order , AF_ZERO );
    161160
    162161        if( ptr2 == NULL) return -1;
     
    173172        {
    174173        // allocate memory for level 3 array
    175         req.type = KMEM_KCM;
    176         req.order = w3 + ( (sizeof(void*) == 4) ? 2 : 3 );
    177         req.flags = AF_KERNEL | AF_ZERO;
    178         ptr3 = kmem_alloc( &req );
     174        order = w3 + ( (sizeof(void*) == 4) ? 2 : 3 );
     175        ptr3 = kmem_alloc( order , AF_ZERO );
    179176
    180177        if( ptr3 == NULL) return -1;
     
    202199
    203200// Check key value
    204 assert( __FUNCTION__, ((key >> (w1 + w2 + w3)) == 0 ), "illegal key value %x\n", key );
     201assert( __FUNCTION__, ((key >> (w1 + w2 + w3)) == 0 ),
     202"illegal key value %x\n", key );
    205203
    206204    // compute indexes
     
    244242
    245243// Check key value
    246 assert( __FUNCTION__, ((key >> (w1 + w2 + w3)) == 0 ), "illegal key value %x\n", key );
     244assert( __FUNCTION__, ((key >> (w1 + w2 + w3)) == 0 ),
     245"illegal key value %x\n", key );
    247246
    248247    void         ** ptr1 = rt->root;
     
    284283
    285284// Check key value
    286 assert( __FUNCTION__, ((start_key >> (w1 + w2 + w3)) == 0 ), "illegal key value %x\n", start_key );
     285assert( __FUNCTION__, ((start_key >> (w1 + w2 + w3)) == 0 ),
     286"illegal key value %x\n", start_key );
    287287
    288288    // compute max indexes
     
    338338                           uint32_t   ix3_width )
    339339{
     340
     341assert( __FUNCTION__, (rt_xp != XPTR_NULL),
     342"extended pointer on radix tree is NULL\n" );
     343
    340344    void      ** root;
    341         kmem_req_t   req;
    342345
    343346    // get cluster and local pointer
     
    351354
    352355    // allocates first level array
    353         req.type  = KMEM_KCM;
    354         req.order = ix1_width + ( (sizeof(void*) == 4) ? 2 : 3 );
    355         req.flags = AF_KERNEL | AF_ZERO;
    356         root      = kmem_remote_alloc( rt_cxy , &req );
     356        uint32_t order = ix1_width + ( (sizeof(void*) == 4) ? 2 : 3 );
     357        root = kmem_remote_alloc( rt_cxy , order , AF_ZERO );
    357358
    358359        if( root == NULL )
     
    372373void grdxt_remote_destroy( xptr_t  rt_xp )
    373374{
    374         kmem_req_t req;
     375
     376assert( __FUNCTION__, (rt_xp != XPTR_NULL),
     377"extended pointer on radix tree is NULL\n" );
     378
     379    uint32_t   order;
    375380
    376381    uint32_t   w1;
     
    422427
    423428            // release level 3 array
    424             req.type = KMEM_KCM;
    425                     req.ptr  = ptr3;
    426                     kmem_remote_free( rt_cxy , &req );
     429                    order = w3 + ((sizeof(void*) == 4) ? 2 : 3 );
     430                    kmem_remote_free( rt_cxy , ptr3 , order );
    427431        }
    428432
    429433        // release level 2 array
    430         req.type = KMEM_KCM;
    431                 req.ptr  = ptr2;
    432             kmem_remote_free( rt_cxy , &req );
     434        order = w2 + ((sizeof(void*) == 4) ? 2 : 3 );
     435        kmem_remote_free( rt_cxy , ptr2 , order );
    433436    }
    434437
    435438    // release level 1 array
    436     req.type = KMEM_KCM;
    437         req.ptr  = ptr1;
    438     kmem_remote_free( rt_cxy , &req );
     439    order = w1 + ((sizeof(void*) == 4) ? 2 : 3 );
     440    kmem_remote_free( rt_cxy , ptr1 , order );
    439441
    440442}  // end grdxt_remote_destroy()
     
    445447                             void     * value )
    446448{
    447     kmem_req_t  req;
     449    uint32_t order;
    448450
    449451    // get cluster and local pointer on remote rt descriptor
     
    507509    {
    508510        // allocate memory in remote cluster
    509         req.type  = KMEM_KCM;
    510         req.order = w2 + ((sizeof(void*) == 4) ? 2 : 3 );
    511         req.flags = AF_ZERO | AF_KERNEL;
    512         ptr2 = kmem_remote_alloc( rt_cxy , &req );
     511        order = w2 + ((sizeof(void*) == 4) ? 2 : 3 );
     512        ptr2 = kmem_remote_alloc( rt_cxy , order , AF_ZERO );
    513513
    514514        if( ptr2 == NULL ) return -1;
     
    538538    {
    539539        // allocate memory in remote cluster
    540         req.type  = KMEM_KCM;
    541         req.order = w3 + ((sizeof(void*) == 4) ? 2 : 3 );
    542         req.flags = AF_ZERO | AF_KERNEL;
    543         ptr3 = kmem_remote_alloc( rt_cxy , &req );
     540        order = w3 + ((sizeof(void*) == 4) ? 2 : 3 );
     541        ptr3 = kmem_remote_alloc( rt_cxy , order , AF_ZERO );
    544542
    545543        if( ptr3 == NULL ) return -1;
  • trunk/kernel/libk/remote_barrier.c

    r671 r683  
    22 * remote_barrier.c -  POSIX barrier implementation.
    33 *
    4  * Author   Alain Greiner (2016,2017,2018,2019)
     4 * Author   Alain Greiner    (2016,2017,2018,2019,2020)
    55 *
    66 * Copyright (c) UPMC Sorbonne Universites
     
    8484{
    8585    generic_barrier_t * gen_barrier_ptr;  // local pointer on generic barrier descriptor
    86     void              * barrier;          // local pointer on implementation barrier descriptor     
    87     kmem_req_t          req;              // kmem request
     86    void              * barrier;          // local pointer on impl barrier descriptor     
    8887
    8988    // get pointer on local process_descriptor
     
    9695
    9796    // allocate memory for generic barrier descriptor
    98     req.type   = KMEM_KCM;
    99     req.order  = bits_log2( sizeof(generic_barrier_t) );
    100     req.flags  = AF_ZERO | AF_KERNEL;
    101     gen_barrier_ptr = kmem_remote_alloc( ref_cxy , &req );
    102 
     97    gen_barrier_ptr = kmem_remote_alloc( ref_cxy,
     98                                         bits_log2(sizeof(generic_barrier_t)),
     99                                         AF_KERNEL );
    103100    if( gen_barrier_ptr == NULL )
    104101    {
     
    108105
    109106    // create implementation specific barrier descriptor
    110     if( attr == NULL )                                    // simple barrier implementation
     107    if( attr == NULL )                                    // simple barrier
    111108    {
    112109        // create simple barrier descriptor
    113110         barrier = simple_barrier_create( count );
    114 
    115         if( barrier == NULL ) return -1;
    116     }
    117     else                                                  // QDT barrier implementation
     111    }
     112    else                                                  // QDT barrier
    118113    {
    119114        uint32_t x_size   = attr->x_size;
     
    126121            printk("\n[ERROR] in %s : count(%d) != x_size(%d) * y_size(%d) * nthreads(%d)\n",
    127122            __FUNCTION__, count, x_size, y_size, nthreads );
     123            kmem_remote_free( ref_cxy,
     124                              gen_barrier_ptr,
     125                              bits_log2(sizeof(generic_barrier_t)) );
    128126            return -1;
    129127        }
     
    131129        // create DQT barrier descriptor
    132130        barrier = dqt_barrier_create( x_size , y_size , nthreads );
    133 
    134         if( barrier == NULL ) return -1;
     131    }
     132
     133    if( barrier == NULL )
     134    {
     135        printk("\n[ERROR] in %s : cannot create impl barrier\n", __FUNCTION__ );
     136        kmem_remote_free( ref_cxy,
     137                          gen_barrier_ptr,
     138                          bits_log2(sizeof(generic_barrier_t)) );
     139        return -1;
    135140    }
    136141
     
    157162void generic_barrier_destroy( xptr_t gen_barrier_xp )
    158163{
    159     kmem_req_t  req;              // kmem request
    160 
    161164    // get pointer on local process_descriptor
    162165    process_t * process = CURRENT_THREAD->process;
     
    191194    remote_busylock_release( lock_xp );
    192195
    193     // release memory allocated to barrier descriptor
    194     req.type          = KMEM_KCM;
    195     req.ptr           = gen_barrier_ptr;
    196     kmem_remote_free( ref_cxy , &req );
     196    // release memory allocated to generic barrier descriptor
     197    kmem_remote_free( gen_barrier_cxy,
     198                      gen_barrier_ptr,
     199                      bits_log2(sizeof(generic_barrier_t)) );
    197200
    198201}  // end generic_barrier_destroy()
     
    246249simple_barrier_t * simple_barrier_create( uint32_t  count )
    247250{
    248     kmem_req_t         req;
    249251    simple_barrier_t * barrier;
    250252
     
    258260
    259261    // allocate memory for simple barrier descriptor
    260     req.type   = KMEM_KCM;
    261     req.order  = bits_log2( sizeof(simple_barrier_t) );
    262     req.flags  = AF_ZERO | AF_KERNEL;
    263     barrier    = kmem_remote_alloc( ref_cxy , &req );
    264 
     262    barrier  = kmem_remote_alloc( ref_cxy,
     263                                  bits_log2(sizeof(simple_barrier_t)),
     264                                  AF_ZERO );
    265265    if( barrier == NULL )
    266266    {
     
    291291void simple_barrier_destroy( xptr_t barrier_xp )
    292292{
    293     kmem_req_t  req;
    294 
    295293    // get barrier cluster and local pointer
    296294    cxy_t              barrier_cxy = GET_CXY( barrier_xp );
     
    298296
    299297    // release memory allocated for barrier descriptor
    300     req.type = KMEM_KCM;
    301     req.ptr  = barrier_ptr;
    302     kmem_remote_free( barrier_cxy , &req );
     298    kmem_remote_free( barrier_cxy,
     299                      barrier_ptr,
     300                      bits_log2(sizeof(simple_barrier_t)) );
    303301
    304302#if DEBUG_BARRIER_DESTROY
     
    471469    uint32_t        y;             // Y coordinate in QDT mesh
    472470    uint32_t        l;             // level coordinate
    473     kmem_req_t      req;           // kmem request
    474471
    475472    // compute number of DQT levels, depending on the mesh size
     
    478475
    479476// check x_size and y_size arguments
    480 assert( __FUNCTION__, (z <= 16) , "DQT mesh size larger than (16*16)\n");
     477assert( __FUNCTION__, (z <= 16),
     478"DQT mesh size larger than (16*16)\n");
    481479
    482480// check size of an array of 5 DQT nodes
    483 assert( __FUNCTION__, (sizeof(dqt_node_t) * 5 <= 512 ), "array of DQT nodes larger than 512 bytes\n");
     481assert( __FUNCTION__, (sizeof(dqt_node_t) * 5 <= 512 ),
     482"array of DQT nodes larger than 512 bytes\n");
    484483
    485484// check size of DQT barrier descriptor
    486 assert( __FUNCTION__, (sizeof(dqt_barrier_t) <= 0x4000 ), "DQT barrier descriptor larger than 4 pages\n");
     485assert( __FUNCTION__, (sizeof(dqt_barrier_t) <= 0x4000 ),
     486"DQT barrier descriptor larger than 4 pages\n");
    487487
    488488    // get pointer on client thread and process descriptors
     
    502502
    503503    // 1. allocate 4 small pages for the DQT barrier descriptor in reference cluster
    504     req.type   = KMEM_PPM;
    505     req.order  = 2;                     // 4 small pages == 16 Kbytes                     
    506     req.flags  = AF_ZERO | AF_KERNEL;
    507     barrier    = kmem_remote_alloc( ref_cxy , &req );
    508 
     504    barrier    = kmem_remote_alloc( ref_cxy,
     505                                    CONFIG_PPM_PAGE_ORDER + 2,   // 4 small pages
     506                                    AF_ZERO );     
    509507    if( barrier == NULL )
    510508    {
     
    536534        {
    537535            cxy_t  cxy = HAL_CXY_FROM_XY( x , y );   // target cluster identifier
    538             xptr_t local_array_xp;                   // xptr of nodes array in cluster cxy
     536            xptr_t local_array_xp;                   // xptr on nodes array in cluster cxy
    539537
    540538            // allocate memory in existing clusters only
    541539            if( LOCAL_CLUSTER->cluster_info[x][y] )
    542540            {
    543                 req.type  = KMEM_KCM;
    544                 req.order = 9;                    // 512 bytes
    545                 req.flags = AF_ZERO | AF_KERNEL;
    546 
    547                 void * ptr = kmem_remote_alloc( cxy , &req );
     541                void * ptr = kmem_remote_alloc( cxy , 9 , AF_ZERO );  // 512 bytes
    548542
    549543                if( ptr == NULL )
     
    729723void dqt_barrier_destroy( xptr_t   barrier_xp )
    730724{
    731     kmem_req_t   req;                      // kmem request
    732725    uint32_t     x;
    733726    uint32_t     y;
    734 
    735727
    736728    // get DQT barrier descriptor cluster and local pointer
     
    767759                void  * buf       = GET_PTR( buf_xp );
    768760
    769 assert( __FUNCTION__, (cxy == GET_CXY(buf_xp)) , "bad extended pointer on dqt_nodes array\n" );
    770 
    771                 req.type  = KMEM_KCM;
    772                 req.ptr   = buf;
    773                 kmem_remote_free( cxy , &req );
     761                kmem_remote_free( cxy , buf , 9 );    // 512 bytes
    774762
    775763#if DEBUG_BARRIER_DESTROY
     
    785773
    786774    // 2. release memory allocated for barrier descriptor in ref cluster
    787     req.type = KMEM_PPM;
    788     req.ptr  = barrier_ptr;
    789     kmem_remote_free( barrier_cxy , &req );
     775    kmem_remote_free( barrier_cxy,
     776                      barrier_ptr,
     777                      CONFIG_PPM_PAGE_ORDER + 2 );   // 4 small pages
    790778
    791779#if DEBUG_BARRIER_DESTROY
  • trunk/kernel/libk/remote_buf.c

    r671 r683  
    3434remote_buf_t * remote_buf_alloc( cxy_t  cxy )
    3535{
    36     kmem_req_t req;
    37 
    38     req.type  = KMEM_KCM;
    39     req.order = bits_log2( sizeof(remote_buf_t) );
    40     req.flags = AF_ZERO;
    41     return kmem_remote_alloc( cxy , &req );
     36    return kmem_remote_alloc( cxy,
     37                              bits_log2(sizeof(remote_buf_t)),
     38                              AF_ZERO );
    4239}
    4340
     
    5047assert( __FUNCTION__ , (order < 32) , "order cannot be larger than 31" );
    5148
    52     kmem_req_t     req;
    5349    uint8_t      * data;
    5450
     
    5753
    5854    // allocate the data buffer
    59     if( order >= CONFIG_PPM_PAGE_SHIFT )  // use KMEM_PPM
    60     {
    61         req.type  = KMEM_PPM;
    62         req.order = order - CONFIG_PPM_PAGE_SHIFT;
    63         req.flags = AF_NONE;
    64         data = kmem_remote_alloc( buf_cxy , &req );
    65 
    66         if( data == NULL )  return -1;
    67     }
    68     else                                     // use KMEM_KCM
    69     {
    70         req.type  = KMEM_KCM;
    71         req.order = order;
    72         req.flags = AF_NONE;
    73         data = kmem_remote_alloc( buf_cxy , &req );
    74 
    75         if( data == NULL )  return -1;
    76     }
     55    data = kmem_remote_alloc( buf_cxy , order , AF_NONE );
     56
     57    if( data == NULL )  return -1;
    7758
    7859    // initialize buffer descriptor
     
    9071void remote_buf_release_data( xptr_t  buf_xp )
    9172{
    92     kmem_req_t     req;
    9373
    9474assert( __FUNCTION__ , (buf_xp != XPTR_NULL) , "buf_xp cannot be NULL" );
     
    10282
    10383    // release memory allocated for data buffer  if required
    104     if( data_ptr != NULL )
    105     {
    106         if( order >= CONFIG_PPM_PAGE_SHIFT )          // use KMEM_PPM
    107         {
    108             req.type  = KMEM_PPM;
    109             req.ptr   = data_ptr;
    110             kmem_remote_free( buf_cxy , &req );
    111         }
    112         else                                          // use KMEM_KCM
    113         {
    114             req.type  = KMEM_KCM;
    115             req.ptr   = data_ptr;
    116             kmem_remote_free( buf_cxy , &req );
    117         }
    118     }
     84    if( data_ptr != NULL )  kmem_remote_free( buf_cxy , data_ptr , order );
     85 
    11986}  // end remote_buf_release_data()
    12087
     
    12592assert( __FUNCTION__ , (buf_xp != XPTR_NULL) , "buf_xp cannot be NULL" );
    12693
    127     kmem_req_t   req;
    128 
    12994    remote_buf_t * buf_ptr = GET_PTR( buf_xp );
    13095    cxy_t          buf_cxy = GET_CXY( buf_xp );
     
    13499
    135100    // release remote_buf descriptor
    136     req.type = KMEM_KCM;
    137     req.ptr  = buf_ptr;
    138     kmem_remote_free( buf_cxy , &req );
     101    kmem_remote_free( buf_cxy , buf_ptr , bits_log2(sizeof(remote_buf_t)) );
    139102
    140103}  // end remote_buf_destroy()
     
    404367}  // end remote_buf_status()
    405368
    406 
     369///////////////////////////////////////////////
     370void remote_buf_display( const char * func_str,
     371                         xptr_t       buf_xp,
     372                         uint32_t     nbytes,
     373                         uint32_t     offset )
     374{
     375    if( nbytes > 256 )
     376    {
     377        printk("\n[WARNING] in %s : no more than 256 bytes\n", __FUNCTION__ );
     378        nbytes = 256;
     379    }
     380
     381    uint8_t        string[128];          // for header
     382    uint8_t        local_data[256];      // local data buffer
     383
     384    cxy_t          cxy = GET_CXY( buf_xp );
     385    remote_buf_t * ptr = GET_PTR( buf_xp );
     386
     387    uint32_t   order = hal_remote_l32( XPTR( cxy , &ptr->order ));
     388    uint32_t   rid   = hal_remote_l32( XPTR( cxy , &ptr->rid ));
     389    uint32_t   wid   = hal_remote_l32( XPTR( cxy , &ptr->wid ));
     390    uint32_t   sts   = hal_remote_l32( XPTR( cxy , &ptr->sts ));
     391    uint8_t  * data  = hal_remote_lpt( XPTR( cxy , &ptr->data ));
     392
     393    // make a local copy of data buffer
     394    hal_remote_memcpy( XPTR( local_cxy , local_data ),
     395                       XPTR( cxy , data + offset ),
     396                       nbytes );
     397
     398    // build header
     399    snprintk( (char*)string , 128 ,
     400    "in %s remote buffer [%x,%x] : size %d / rid %d / wid %d / sts %d ",
     401    func_str , cxy , ptr , 1<<order , rid , wid , sts );
     402
     403    // display buffer on TXT0
     404    putb( (char*)string , local_data , nbytes );
     405
     406}  // end remote_buf_display()
  • trunk/kernel/libk/remote_buf.h

    r671 r683  
    176176uint32_t remote_buf_status( xptr_t  buf_xp );
    177177
     178/************************************************************************************
     179 * This debug function displays on the kernel terminal the current state of a remote
     180 * buffer identified by the <buf_xp> argument : order / rid / wid / sts.
     181 * If the <nbytes> argument is not nul, and not larger than 256, it displays up to
     182 * 256 bytes of the data buffer, from <offset> to (offset + nbytes -1).
     183 ************************************************************************************
     184 * @ func_str  : [in] calling function name (displayed in header). 
     185 * @ buf_xp    : [in] extended pointer pointer on remote buffer descriptor.
     186 * @ nbytes    : [in] number of data bytes to display.
     187 * @ offset    : [in] index of first displayed byte in data buffer.
     188 ***********************************************************************************/
     189void remote_buf_display( const char * func_str,
     190                         xptr_t       buf_xp,
     191                         uint32_t     nbytes,
     192                         uint32_t     offset );
     193
    178194#endif  /* _REMOTE_BUFFER_H_ */
  • trunk/kernel/libk/remote_condvar.c

    r635 r683  
    22 * remote_condvar.c - remote kernel condition variable implementation.
    33 *
    4  * Authors     Alain Greiner (2016,2017,2018,2019)
     4 * Authors     Alain Greiner (2016,2017,2018,2019,2020)
    55 *
    66 * Copyright (c) UPMC Sorbonne Universites
     
    8686{
    8787    remote_condvar_t * condvar_ptr;
    88     kmem_req_t         req;   
    8988
    9089    // get pointer on local process descriptor
     
    9897    process_t * ref_ptr = (process_t *)GET_PTR( ref_xp );
    9998
    100     req.type    = KMEM_KCM;
    101     req.order   = bits_log2( sizeof(remote_condvar_t) );
    102     req.flags   = AF_ZERO | AF_KERNEL;
    103     condvar_ptr = kmem_alloc( &req );
     99    // allocate memory for condvar descriptor
     100    condvar_ptr = kmem_alloc( bits_log2(sizeof(remote_condvar_t)) , AF_ZERO );
    104101
    105102    if( condvar_ptr == NULL )
     
    130127void remote_condvar_destroy( xptr_t condvar_xp )
    131128{
    132     kmem_req_t  req;
    133 
    134129    // get pointer on local process descriptor
    135130    process_t * process = CURRENT_THREAD->process;
     
    162157
    163158    // release memory allocated for condvar descriptor
    164     req.type = KMEM_KCM;
    165     req.ptr  = condvar_ptr;
    166     kmem_remote_free( ref_cxy , &req );
     159    kmem_remote_free( ref_cxy , condvar_ptr , bits_log2(sizeof(remote_condvar_t)) );
    167160
    168161}  // end remote_convar_destroy()
  • trunk/kernel/libk/remote_condvar.h

    r635 r683  
    22 * remote_condvar.h: POSIX condition variable definition.     
    33 *
    4  * Authors  Alain Greiner (2016,2017,2018,2019)
     4 * Authors  Alain Greiner (2016,2017,2018,2019,2020)
    55 *
    66 * Copyright (c) UPMC Sorbonne Universites
  • trunk/kernel/libk/remote_fifo.c

    r657 r683  
    4242        fifo->wr_id     = 0;
    4343        fifo->rd_id     = 0;
    44     for( slot = 0 ; slot < CONFIG_REMOTE_FIFO_SLOTS ; slot++ )
     44    for( slot = 0 ; slot < CONFIG_RPC_FIFO_SLOTS ; slot++ )
    4545    {
    4646        fifo->valid[slot] = 0;
     
    6969
    7070    // wait until allocated slot is empty in remote FIFO
    71     // max retry = CONFIG_REMOTE_FIFO_MAX_ITERATIONS 
     71    // max retry = CONFIG_RPC_FIFO_MAX_ITERATIONS 
    7272    // return error if watchdog is reached
    7373    while( 1 )
    7474    {
    7575        // return error if contention detected by watchdog
    76         if( watchdog > CONFIG_REMOTE_FIFO_MAX_ITERATIONS )  return EBUSY;
     76        if( watchdog > CONFIG_RPC_FIFO_MAX_ITERATIONS )  return EBUSY;
    7777
    7878        // read remote rd_id value
     
    8484
    8585        // exit waiting loop as soon as fifo not full
    86         if ( nslots < CONFIG_REMOTE_FIFO_SLOTS )  break;
     86        if ( nslots < CONFIG_RPC_FIFO_SLOTS )  break;
    8787       
    8888        // retry later if fifo full:
     
    9797
    9898    // compute actual write slot pointer
    99     ptw = wr_id % CONFIG_REMOTE_FIFO_SLOTS;
     99    ptw = wr_id % CONFIG_RPC_FIFO_SLOTS;
    100100
    101101    // copy item to fifo
     
    123123       
    124124    // compute actual read slot pointer
    125         uint32_t ptr = rd_id % CONFIG_REMOTE_FIFO_SLOTS;
     125        uint32_t ptr = rd_id % CONFIG_RPC_FIFO_SLOTS;
    126126       
    127127        // wait slot filled by the writer
     
    158158    else                 nslots = (0xFFFFFFFF - rd_id) + wr_id;
    159159
    160     return ( nslots >= CONFIG_REMOTE_FIFO_SLOTS );
     160    return ( nslots >= CONFIG_RPC_FIFO_SLOTS );
    161161}
    162162
  • trunk/kernel/libk/remote_fifo.h

    r563 r683  
    3636 * that is used for - RPC based - inter cluster communications.
    3737 * Each FIF0 slot can contain one 64 bits integer (or one extended pointer).
    38  * The number of slots is defined by the CONFIG_REMOTE_FIFO_SLOTS parameter.
     38 * The number of slots is defined by the CONFIG_RPC_FIFO_SLOTS parameter.
    3939 * - The write accesses are implemented using a lock-free algorithm, as it uses
    4040 *   a ticket based mechanism to handle concurrent access between multiple writers.
     
    4545 *   and RPC threads cannot have local index LTID = 0.
    4646*
    47  * WARNING : Each FIFO requires 12 + (12 * CONFIG_REMOTE_FIFO_SLOTS) bytes.
     47 * WARNING : Each FIFO requires 12 + (12 * CONFIG_RPC_FIFO_SLOTS) bytes.
    4848 ***********************************************************************************/
    4949
     
    5353        volatile uint32_t  wr_id;                            /*! write slot index      */
    5454        volatile uint32_t  rd_id;                            /*! read  slot index      */
    55     volatile uint32_t  valid[CONFIG_REMOTE_FIFO_SLOTS];  /*! empty slot if 0       */
    56         uint64_t           data[CONFIG_REMOTE_FIFO_SLOTS];   /*! fifo slot content     */
     55    volatile uint32_t  valid[CONFIG_RPC_FIFO_SLOTS];  /*! empty slot if 0       */
     56        uint64_t           data[CONFIG_RPC_FIFO_SLOTS];   /*! fifo slot content     */
    5757}
    5858remote_fifo_t;
     
    8484 * the slot is empty, using a descheduling policy without blocking if required.
    8585 * It implements a watchdog, returning when the item has been successfully
    86  * registered, or after CONFIG_REMOTE_FIFO_MAX_ITERATIONS failures.   
     86 * registered, or after CONFIG_RPC_FIFO_MAX_ITERATIONS failures.   
    8787 ************************************************************************************
    8888 * @ fifo    : extended pointer to the remote fifo.
  • trunk/kernel/libk/remote_mutex.c

    r635 r683  
    22 * remote_mutex.c - POSIX mutex implementation.
    33 *
    4  * Authors   Alain   Greiner (2016,2017,2018,2019)
     4 * Authors   Alain   Greiner (2016,2017,2018,2019,2020:)
    55 *
    66 * Copyright (c) UPMC Sorbonne Universites
     
    8585{
    8686    remote_mutex_t * mutex_ptr;
    87     kmem_req_t       req;   
    8887
    8988    // get pointer on local process descriptor
     
    9897
    9998    // allocate memory for mutex descriptor in reference cluster
    100     req.type    = KMEM_KCM;
    101     req.order   = bits_log2( sizeof(remote_mutex_t) );
    102     req.flags   = AF_ZERO | AF_KERNEL;
    103     mutex_ptr   = kmem_remote_alloc( ref_cxy , &req );
     99    mutex_ptr = kmem_remote_alloc( ref_cxy , bits_log2(sizeof(remote_mutex_t)) , AF_ZERO );
    104100
    105101    if( mutex_ptr == NULL )
     
    145141void remote_mutex_destroy( xptr_t mutex_xp )
    146142{
    147     kmem_req_t  req;
    148 
    149143    // get pointer on local process descriptor
    150144    process_t * process = CURRENT_THREAD->process;
     
    171165
    172166    // release memory allocated for mutex descriptor
    173     req.type = KMEM_KCM;
    174     req.ptr  = mutex_ptr;
    175     kmem_remote_free( mutex_cxy , &req );
     167    kmem_remote_free( mutex_cxy , mutex_ptr , bits_log2(sizeof(remote_mutex_t)) );
    176168
    177169}  // end remote_mutex_destroy()
  • trunk/kernel/libk/remote_sem.c

    r671 r683  
    22 * remote_sem.c - POSIX unnamed semaphore implementation.
    33 *
    4  * Author   Alain Greiner  (2016,2017,2018,2019)
     4 * Author   Alain Greiner  (2016,2017,2018,2019,2020)
    55 *
    66 * Copyright (c) UPMC Sorbonne Universites
     
    8686                           uint32_t   value )
    8787{
    88     kmem_req_t     req;   
    8988    remote_sem_t * sem_ptr;
    9089
     
    10099
    101100    // allocate memory for new semaphore in reference cluster
    102     req.type  = KMEM_KCM;
    103     req.order = bits_log2( sizeof(remote_sem_t) );
    104     req.flags = AF_ZERO | AF_KERNEL;
    105     sem_ptr   = kmem_remote_alloc( ref_cxy, &req );
     101    sem_ptr = kmem_remote_alloc( ref_cxy , bits_log2(sizeof(remote_sem_t)) , AF_ZERO );
    106102
    107103    if( sem_ptr == NULL )
     
    144140void remote_sem_destroy( xptr_t sem_xp )
    145141{
    146     kmem_req_t  req;
    147 
    148142    // get pointer on local process descriptor
    149143    process_t * process = CURRENT_THREAD->process;
     
    176170
    177171    // release memory allocated for semaphore descriptor
    178     req.type = KMEM_KCM;
    179     req.ptr  = sem_ptr;
    180     kmem_remote_free( sem_cxy , &req );
     172    kmem_remote_free( sem_cxy , sem_ptr , bits_log2(sizeof(remote_sem_t)) );
    181173
    182174}  // end remote_sem_destroy()
  • trunk/kernel/libk/remote_sem.h

    r581 r683  
    22 * remote_sem.h - POSIX unnamed semaphore definition.
    33 *
    4  * Author   Alain Greiner (2016,2017,2018)
     4 * Author   Alain Greiner    (2016,2017,2018,2019,2020)
    55 *
    66 * Copyright (c)  UPMC Sorbonne Universites
  • trunk/kernel/libk/user_dir.c

    r671 r683  
    22 * user_dir.c - kernel DIR related operations implementation.
    33 *
    4  * Authors   Alain   Greiner (2016,2017,2018,2019)
     4 * Authors   Alain   Greiner (2016,2017,2018,2019,2020)
    55 *
    66 * Copyright (c) UPMC Sorbonne Universites
     
    105105    list_entry_t    root;              // root of temporary list of allocated pages
    106106    uint32_t        page_id;           // page index in list of physical pages
    107     kmem_req_t      req;               // kmem request descriptor
    108107    ppn_t           fake_ppn;          // unused, but required by hal_gptlock_pte()
    109108    uint32_t        fake_attr;         // unused, but required by hal_gptlock_pte()
    110109    error_t         error;
     110
     111#if DEBUG_USER_DIR_CREATE || DEBUG_USER_DIR_ERROR
     112uint32_t   cycle = (uint32_t)hal_get_cycles();
     113thread_t * this  = CURRENT_THREAD;
     114#endif
    111115
    112116    // get cluster, local pointer, and pid of reference process
     
    115119    ref_pid = hal_remote_l32( XPTR( ref_cxy , &ref_ptr->pid ) );
    116120
    117 #if DEBUG_USER_DIR
    118 uint32_t cycle = (uint32_t)hal_get_cycles();
    119 thread_t * this = CURRENT_THREAD;
    120 if( cycle > DEBUG_USER_DIR )
     121#if DEBUG_USER_DIR_CREATE
     122if( DEBUG_USER_DIR_CREATE < cycle )
    121123printk("\n[%s] thread[%x,%x] enter for inode (%x,%x) and process %x / cycle %d\n",
    122124__FUNCTION__, this->process->pid, this->trdid, local_cxy, inode, ref_pid, cycle );
     
    133135
    134136    // allocate memory for a local user_dir descriptor
    135     req.type  = KMEM_KCM;
    136     req.order = bits_log2( sizeof(user_dir_t) );
    137     req.flags = AF_ZERO | AF_KERNEL;
    138     dir       = kmem_alloc( &req );
     137    dir = kmem_alloc( bits_log2(sizeof(user_dir_t)) , AF_ZERO );
    139138
    140139    if( dir == NULL )
    141140    {
    142         printk("\n[ERROR] in %s : cannot allocate user_dir_t in cluster %x\n",
    143         __FUNCTION__, local_cxy );
     141
     142#if DEBUG_USER_DIR_ERROR
     143printk("\n[ERROR] in %s : thread[%x,%x] cannot allocate user_dir_t in cluster %x / cycle %d\n",
     144__FUNCTION__, this->process->pid, this->trdid, local_cxy, cycle );
     145#endif
    144146        return NULL;
    145147    }
    146148
    147     // Build and initialize the dirent array as a list of pages.
    148     // For each iteration in this while loop:
     149    // First loop to build and initialize the dirent array
     150    // as a temporary list of pages. For each iteration :
    149151    // - allocate one physical 4 Kbytes (64 dirent slots)
    150152    // - call the relevant FS specific function to scan the directory mapper,
     
    162164    {
    163165        // allocate one physical page
    164         req.type  = KMEM_PPM;
    165         req.order = 0;
    166         req.flags = AF_ZERO;
    167         base      = kmem_alloc( &req );
     166        base = kmem_alloc( CONFIG_PPM_PAGE_ORDER , AF_ZERO );
    168167
    169168        if( base == NULL )
    170169        {
    171             printk("\n[ERROR] in %s : cannot allocate page in cluster %x\n",
    172             __FUNCTION__, ref_cxy );
     170
     171#if DEBUG_USER_DIR_ERROR
     172printk("\n[ERROR] in %s : thread[%x,%x] cannot allocate page in cluster %x / cycle %d\n",
     173__FUNCTION__, this->process->pid, this->trdid, local_cxy, cycle );
     174#endif
    173175            goto user_dir_create_failure;
    174176        }
     
    184186        if( error )
    185187        {
    186             printk("\n[ERROR] in %s : cannot initialise dirent array in cluster %x\n",
    187             __FUNCTION__, ref_cxy );
     188
     189#if DEBUG_USER_DIR_ERROR
     190printk("\n[ERROR] in %s : thread[%x,%x] cannot initialize dirent array in cluster %x / cycle %d\n",
     191__FUNCTION__, this->process->pid, this->trdid, local_cxy, cycle );
     192#endif
    188193            goto user_dir_create_failure;
    189194        }
     
    204209    } // end while
    205210       
    206 #if DEBUG_USER_DIR
    207 if( cycle > DEBUG_USER_DIR )
     211#if DEBUG_USER_DIR_CREATE
     212if( DEBUG_USER_DIR_CREATE < cycle )
    208213printk("\n[%s] thread[%x,%x] initialised dirent array / %d entries\n",
    209214__FUNCTION__, this->process->pid, this->trdid, total_dirents, cycle );
     
    241246    if( vseg == NULL )
    242247    {
    243         printk("\n[ERROR] in %s : cannot create vseg for user_dir in cluster %x\n",
    244         __FUNCTION__, ref_cxy);
     248
     249#if DEBUG_USER_DIR_ERROR
     250printk("\n[ERROR] in %s : thread[%x,%x] cannot create vseg in cluster %x / cycle %d\n",
     251__FUNCTION__, this->process->pid, this->trdid, local_cxy, cycle );
     252#endif
    245253        goto user_dir_create_failure;
    246254    }
    247255
    248 #if DEBUG_USER_DIR
    249 if( cycle > DEBUG_USER_DIR )
     256#if DEBUG_USER_DIR_CREATE
     257if( DEBUG_USER_DIR_CREATE < cycle )
    250258printk("\n[%s] thread[%x,%x] allocated vseg ANON / base %x / size %x\n",
    251259__FUNCTION__, this->process->pid, this->trdid, vseg->min, vseg->max - vseg->min );
     
    269277    vpn_base = hal_remote_l32( XPTR( ref_cxy , &vseg->vpn_base ) );
    270278
    271     // scan the list of allocated physical pages to map
     279    // Second loop on the allocated physical pages to map
    272280    // all physical pages in the reference process GPT
     281    // The pages are mapped in the user process GPT, but
     282    // are removed from the temporary list
     283
    273284    page_id = 0;
     285
    274286    while( list_is_empty( &root ) == false )
    275287    {
     
    290302        if( error )
    291303        {
    292             printk("\n[ERROR] in %s : cannot map vpn %x in GPT\n",
    293             __FUNCTION__, vpn );
    294 
     304
     305#if DEBUG_USER_DIR_ERROR
     306printk("\n[ERROR] in %s : thread[%x,%x] cannot map vpn %x in cluster %x / cycle %d\n",
     307__FUNCTION__, this->process->pid, this->trdid, vpn, local_cxy, cycle );
     308#endif
    295309            // delete the vseg
    296310            intptr_t base = (intptr_t)hal_remote_lpt( XPTR( ref_cxy , &vseg->min ) );
     
    298312         
    299313            // release the user_dir descriptor
    300             req.type = KMEM_KCM;
    301             req.ptr  = dir;
    302             kmem_free( &req );
     314            kmem_free( dir , bits_log2(sizeof(user_dir_t)) );
    303315            return NULL;
    304316        }
     
    310322                         ppn );
    311323
    312 #if DEBUG_USER_DIR 
    313 if( cycle > DEBUG_USER_DIR )
     324#if DEBUG_USER_DIR_CREATE
     325if( DEBUG_USER_DIR_CREATE < cycle )
    314326printk("\n[%s] thread[%x,%x] mapped vpn %x to ppn %x\n",
    315327__FUNCTION__, this->process->pid, this->trdid, vpn + page_id, ppn );
     
    329341    dir->current = 0;
    330342    dir->entries = total_dirents;
    331     dir->ident   = (intptr_t)(vpn_base << CONFIG_PPM_PAGE_SHIFT);
     343    dir->ident   = (intptr_t)(vpn_base << CONFIG_PPM_PAGE_ORDER);
    332344
    333345    // build extended pointers on root and lock of user_dir xlist in ref process
     
    347359    remote_queuelock_release( lock_xp );
    348360
    349 #if DEBUG_USER_DIR
    350 cycle = (uint32_t)hal_get_cycles();
    351 if( cycle > DEBUG_USER_DIR )
     361#if DEBUG_USER_DIR_CREATE
     362if( DEBUG_USER_DIR_CREATE < cycle )
    352363printk("\n[%s] thread[%x,%x] created user_dir (%x,%x) / %d entries / cycle %d\n",
    353364__FUNCTION__, this->process->pid, this->trdid, local_cxy, dir, total_dirents, cycle );
     
    358369user_dir_create_failure:
    359370
    360     // release local user_dir_t structure
    361     req.type = KMEM_KCM;
    362     req.ptr  = dir;
    363     kmem_free( &req );
    364 
    365     // release local physical pages
     371    // release user_dir_t structure
     372    kmem_free( dir , bits_log2(sizeof(user_dir_t)) );
     373
     374    // release physical pages
    366375    while( list_is_empty( &root ) == false )
    367376    {
     377        // get page descriptor
    368378        page = LIST_FIRST( &root , page_t , list );
    369379
     
    371381        base = GET_PTR( ppm_page2base( XPTR( local_cxy , page ) ) );
    372382 
    373         req.type  = KMEM_PPM;
    374         req.ptr   = base;
    375         kmem_free( &req );
     383        // release the page
     384        kmem_free( base , CONFIG_PPM_PAGE_ORDER );
    376385    }
    377386
     
    402411    cluster = LOCAL_CLUSTER;
    403412
     413#if DEBUG_USER_DIR_DESTROY
     414uint32_t cycle = (uint32_t)hal_get_cycles();
     415#endif
     416
    404417    // get cluster, local pointer, and PID of reference user process
    405418    ref_cxy = GET_CXY( ref_xp );
     
    407420    ref_pid = hal_remote_l32( XPTR( ref_cxy , &ref_ptr->pid ) );
    408421
    409 #if DEBUG_USER_DIR
    410 uint32_t cycle = (uint32_t)hal_get_cycles();
    411 if( cycle > DEBUG_USER_DIR )
     422#if DEBUG_USER_DIR_DESTROY
     423if( DEBUG_USER_DIR_DESTROY < cycle )
    412424printk("\n[%s] thread[%x,%x] enter for user_dir (%x,%x) and process %x / cycle %d\n",
    413425__FUNCTION__, this->process->pid, this->trdid, local_cxy, dir, ref_pid, cycle );
     
    475487        hal_atomic_add( &responses , 1 );
    476488
    477 #if (DEBUG_USER_DIR & 1)
    478 uint32_t cycle = (uint32_t)hal_get_cycles();
    479 if( cycle > DEBUG_USER_DIR )
     489#if (DEBUG_USER_DIR_DESTROY & 1)
     490if(  DEBUG_USER_DIR_DESTROY < cycle )
    480491printk("\n[%s] thread[%x,%x] register RPC request in cluster %x\n",
    481492__FUNCTION__, this->process->pid, this->trdid, process_cxy );
     
    496507
    497508    // release local user_dir_t structure
    498     kmem_req_t  req;
    499     req.type = KMEM_KCM;
    500     req.ptr  = dir;
    501     kmem_free( &req );
    502 
    503 #if DEBUG_USER_DIR
     509    kmem_free( dir , bits_log2(sizeof(user_dir_t)) );
     510
     511#if DEBUG_USER_DIR_DESTROY
    504512cycle = (uint32_t)hal_get_cycles();
    505 if( cycle > DEBUG_USER_DIR )
     513if( DEBUG_USER_DIR_DESTROY < cycle )
    506514printk("\n[%s] thread[%x,%x] deleted user_dir (%x,%x) / cycle %d\n",
    507515__FUNCTION__, this->process->pid, this->trdid, local_cxy, dir, cycle );
  • trunk/kernel/libk/user_dir.h

    r651 r683  
    22 * user_dir.h -  DIR related operations definition.
    33 *
    4  * Authors   Alain Greiner   (2016,2017,2018,2019)
     4 * Authors   Alain Greiner   (2016,2017,2018,2019,2020)
    55 *
    66 * Copyright (c) UPMC Sorbonne Universites
  • trunk/kernel/mm/kcm.c

    r672 r683  
    3636#include <kcm.h>
    3737
     38///////////////////////////////////////////////////////////////////////////////////////////
     39//         global variables
     40///////////////////////////////////////////////////////////////////////////////////////////
     41
     42extern chdev_directory_t    chdev_dir;          // allocated in kernel_init.c
     43
    3844
    3945/////////////////////////////////////////////////////////////////////////////////////
     
    4248
    4349//////////////////////////////////////////////////////////////////////////////////////
    44 // This static function must be called by a local thread.
     50// This static function is called by the kcm_alloc() function.
    4551// It returns a pointer on a block allocated from an active kcm_page.
    4652// It makes a panic if no block is available in the selected page.
     
    5561{
    5662    // initialise variables
    57     uint32_t size   = 1 << kcm->order;
    58     uint32_t max    = kcm->max_blocks;
     63    uint32_t order  = kcm->order;
    5964    uint32_t count  = kcm_page->count;
    6065    uint64_t status = kcm_page->status;
    6166
    62 assert( __FUNCTION__, (count < max) , "kcm_page should not be full" );
     67// check kcm page not full
     68assert( __FUNCTION__, (count < 63) ,
     69"kcm_page should not be full / cxy %x / order %d / count %d", local_cxy, order, count );
    6370
    6471    uint32_t index  = 1;
     
    6774        // allocate first free block in kcm_page, update status,
    6875    // and count , compute index of allocated block in kcm_page
    69     while( index <= max )
     76    while( index <= 63 )
    7077    {
    7178        if( (status & mask) == 0 )   // block found
     
    8188    }
    8289
    83     // change the page list if found block is the last
    84     if( count == max-1 )
     90    // switch page to full if last block
     91    if( (count + 1) == 63 )
    8592    {
    8693                list_unlink( &kcm_page->list);
     
    9299
    93100        // compute return pointer
    94         void * ptr = (void *)((intptr_t)kcm_page + (index * size) );
    95 
    96 #if DEBUG_KCM
    97 thread_t * this  = CURRENT_THREAD;
    98 uint32_t   cycle = (uint32_t)hal_get_cycles();
    99 if( DEBUG_KCM < cycle )
    100 printk("\n[%s] thread[%x,%x] allocated block %x in page %x / size %d / count %d / cycle %d\n",
    101 __FUNCTION__, this->process->pid, this->trdid, ptr, kcm_page, size, count + 1, cycle );
    102 #endif
     101        void * ptr = (void *)((intptr_t)kcm_page + (index << order));
    103102
    104103        return ptr;
     
    107106
    108107/////////////////////////////////////////////////////////////////////////////////////
    109 // This private static function must be called by a local thread.
     108// This static function is called by the kcm_free() function.
    110109// It releases a previously allocated block to the relevant kcm_page.
    111110// It makes a panic if the released block is not allocated in this page.
     
    121120{
    122121    // initialise variables
    123     uint32_t max    = kcm->max_blocks;
    124     uint32_t size   = 1 << kcm->order;
     122    uint32_t order  = kcm->order;
    125123    uint32_t count  = kcm_page->count;
    126124    uint64_t status = kcm_page->status;
    127125   
    128         // compute block index from block pointer
    129         uint32_t index = ((intptr_t)block_ptr - (intptr_t)kcm_page) / size;
     126        // compute block index from block pointer and kcm_page pointer
     127        uint32_t index = ((intptr_t)block_ptr - (intptr_t)kcm_page) >> order;
    130128
    131129    // compute mask in bit vector
     
    136134        printk("\n[WARNING] in %s : block[%x,%x] not allocated / kcm %x / kcm_page %x\n",
    137135        __FUNCTION__, local_cxy, block_ptr, kcm, kcm_page );
    138         printk("   status %L / mask %L / sts & msk %L\n", status, mask, (status & mask) );
    139136        kcm_remote_display( local_cxy , kcm );
    140137        return;
     
    145142        kcm_page->count  = count - 1;
    146143
    147         // change the page mode if page was full
    148         if( count == max )
     144        // switch page to active if it was full
     145        if( count == 63 )
    149146        {
    150147                list_unlink( &kcm_page->list );
     
    155152        }
    156153
    157 #if DEBUG_KCM
    158 thread_t * this  = CURRENT_THREAD;
    159 uint32_t   cycle = (uint32_t)hal_get_cycles();
    160 if( DEBUG_KCM < cycle )
    161 printk("\n[%s] thread[%x,%x] block %x / page %x / size %d / count %d / cycle %d\n",
    162 __FUNCTION__, this->process->pid, this->trdid, block_ptr, kcm_page, size, count - 1, cycle );
    163 #endif
    164 
    165154}  // kcm_put_block()
    166155
    167156/////////////////////////////////////////////////////////////////////////////////////
    168 // This static function must be called by a local thread.
    169 // It returns one non-full kcm_page with the following policy :
     157// This static function  returns one non-full kcm_page with the following policy :
    170158// - if the "active_list" is non empty, it returns the first "active" page,
    171159//   without modifying the KCM state.
     
    188176    else                            // allocate a new page from PPM
    189177        {
    190         // get one 4 Kbytes page from local PPM
    191         page_t * page = ppm_alloc_pages( 0 );
     178        // get KCM order
     179        uint32_t order = kcm->order;
     180
     181        // get one kcm_page from  PPM
     182        page_t * page = ppm_alloc_pages( order + 6 - CONFIG_PPM_PAGE_ORDER );
    192183
    193184            if( page == NULL )
    194185            {
    195                     printk("\n[ERROR] in %s : failed to allocate page in cluster %x\n",
    196                 __FUNCTION__ , local_cxy );
    197 
     186
     187#if DEBUG_KCM_ERROR
     188printk("\n[ERROR] in %s : failed to allocate page in cluster %x\n",
     189__FUNCTION__ , local_cxy );
     190#endif
    198191                    return NULL;
    199192        }
     
    202195            xptr_t base_xp = ppm_page2base( XPTR( local_cxy , page ) );
    203196
    204         // get local pointer on kcm_page
     197        // get local pointer on kcm_page 
    205198            kcm_page = GET_PTR( base_xp );
    206199
     
    225218{
    226219
    227 assert( __FUNCTION__, ((order > 5) && (order < 12)) , "order must be in [6,11]" );
    228 
    229 assert( __FUNCTION__, (CONFIG_PPM_PAGE_SHIFT == 12) , "check status bit_vector width" );
     220// check argument
     221assert( __FUNCTION__, (order < CONFIG_PPM_PAGE_ORDER),
     222"order argument %d too large", order );
     223
     224assert( __FUNCTION__, (order >= CONFIG_CACHE_LINE_ORDER),
     225"order argument %d too small", order );
    230226
    231227        // initialize lock
     
    238234        list_root_init( &kcm->active_root );
    239235
    240         // initialize order and max_blocks
    241         kcm->order      = order;
    242     kcm->max_blocks = ( CONFIG_PPM_PAGE_SIZE >> order ) - 1;
     236        // initialize order
     237        kcm->order = order;
    243238 
    244239#if DEBUG_KCM
    245 thread_t * this  = CURRENT_THREAD;
    246 uint32_t   cycle = (uint32_t)hal_get_cycles();
    247 if( DEBUG_KCM < cycle )
    248 printk("\n[%s] thread[%x,%x] initialised KCM / order %d / max_blocks %d\n",
    249 __FUNCTION__, this->process->pid, this->trdid, order, kcm->max_blocks );
     240if( (DEBUG_KCM_ORDER == order) && (DEBUG_KCM_CXY == local_cxy) )
     241printk("\n[%s] cxy %x / order %d\n",
     242__FUNCTION__, local_cxy, order );
    250243#endif
    251244
     
    287280void * kcm_alloc( uint32_t order )
    288281{
    289     kcm_t      * kcm_ptr;
     282    kcm_t      * kcm;
    290283        kcm_page_t * kcm_page;
    291         void       * block_ptr;
    292 
    293    // min block size is 64 bytes
    294     if( order < 6 ) order = 6;
    295 
    296 assert( __FUNCTION__, (order < 12) , "order = %d / must be less than 12" , order );
     284        void       * block;
     285
     286// check argument
     287assert( __FUNCTION__, (order < CONFIG_PPM_PAGE_ORDER),
     288"order argument %d too large", order );
     289
     290#if DEBUG_KCM
     291uint32_t cycle = (uint32_t)hal_get_cycles();
     292#endif
     293
     294    // smallest block size is a cache line
     295    if (order < CONFIG_CACHE_LINE_ORDER) order = CONFIG_CACHE_LINE_ORDER;
    297296
    298297    // get local pointer on relevant KCM allocator
    299     kcm_ptr = &LOCAL_CLUSTER->kcm[order - 6];
     298    kcm = &LOCAL_CLUSTER->kcm[order - CONFIG_CACHE_LINE_ORDER];
    300299
    301300    // build extended pointer on local KCM lock
    302     xptr_t lock_xp = XPTR( local_cxy , &kcm_ptr->lock );
     301    xptr_t lock_xp = XPTR( local_cxy , &kcm->lock );
    303302
    304303        // get KCM lock
     
    306305
    307306    // get a non-full kcm_page
    308     kcm_page = kcm_get_page( kcm_ptr );
    309 
    310 #if DEBUG_KCM
    311 thread_t * this  = CURRENT_THREAD;
    312 uint32_t   cycle = (uint32_t)hal_get_cycles();
    313 if( DEBUG_KCM < cycle )
    314 {
    315 printk("\n[%s] thread[%x,%x] enters / order %d / page %x / kcm %x / page_status (%x|%x)\n",
    316 __FUNCTION__, this->process->pid, this->trdid, order, kcm_page, kcm_ptr,
    317 GET_CXY( kcm_page->status ), GET_PTR( kcm_page->status ) );
    318 kcm_remote_display( local_cxy , kcm_ptr );
    319 }
    320 #endif
     307    kcm_page = kcm_get_page( kcm );
    321308
    322309    if( kcm_page == NULL )
     
    326313        }
    327314
    328         // get a block from selected active page
    329         block_ptr = kcm_get_block( kcm_ptr , kcm_page );
     315#if DEBUG_KCM
     316if( (DEBUG_KCM_ORDER == order) && (DEBUG_KCM_CXY == local_cxy) && (DEBUG_KCM < cycle) )
     317printk("\n[%s] enter / order %d / kcm[%x,%x] / nb_full %d / nb_active %d\n"
     318"    page %x / status [%x,%x] / count %d\n",
     319__FUNCTION__, order, local_cxy, kcm, kcm->full_pages_nr, kcm->active_pages_nr,
     320kcm_page, (uint32_t)(kcm_page->status>>32), (uint32_t)(kcm_page->status), kcm_page->count );
     321#endif
     322
     323        // allocate a block from selected active page
     324        block = kcm_get_block( kcm , kcm_page );
     325
     326#if DEBUG_KCM
     327if( (DEBUG_KCM_ORDER == order) && (DEBUG_KCM_CXY == local_cxy) && (DEBUG_KCM < cycle) )
     328printk("\n[%s] exit  / order %d / kcm[%x,%x] / nb_full %d / nb_active %d\n"
     329"    page %x / status [%x,%x] / count %d\n",
     330__FUNCTION__, order, local_cxy, kcm, kcm->full_pages_nr, kcm->active_pages_nr,
     331kcm_page, (uint32_t)(kcm_page->status>>32), (uint32_t)(kcm_page->status), kcm_page->count );
     332#endif
    330333
    331334        // release lock
    332335        remote_busylock_release( lock_xp );
    333336
    334 #if DEBUG_KCM
    335 if( DEBUG_KCM < cycle )
    336 printk("\n[%s] thread[%x,%x] exit / order %d / block %x / kcm %x / page_status (%x|%x)\n",
    337 __FUNCTION__, this->process->pid, this->trdid, order, block_ptr, kcm_ptr,
    338 GET_CXY( kcm_page->status ), GET_PTR( kcm_page->status ) );
    339 #endif
    340 
    341         return block_ptr;
     337        return block;
    342338
    343339}  // end kcm_alloc()
    344340
    345 /////////////////////////////////
    346 void kcm_free( void * block_ptr )
    347 {
    348     kcm_t      * kcm_ptr;
     341///////////////////////////////
     342void kcm_free( void    * block,
     343               uint32_t  order )
     344{
     345    kcm_t      * kcm;
    349346        kcm_page_t * kcm_page;
    350347
    351348// check argument
    352 assert( __FUNCTION__, (block_ptr != NULL) , "block pointer cannot be NULL" );
     349assert( __FUNCTION__, (block != NULL),
     350"block pointer cannot be NULL" );
     351
     352#if DEBUG_KCM
     353uint32_t cycle = (uint32_t)hal_get_cycles();
     354#endif
     355
     356    // smallest block size is a cache line
     357    if (order < CONFIG_CACHE_LINE_ORDER) order = CONFIG_CACHE_LINE_ORDER;
     358
     359    // get local pointer on relevant KCM allocator
     360    kcm = &LOCAL_CLUSTER->kcm[order - CONFIG_CACHE_LINE_ORDER];
    353361
    354362    // get local pointer on KCM page
    355         kcm_page = (kcm_page_t *)((intptr_t)block_ptr & ~CONFIG_PPM_PAGE_MASK);
    356 
    357     // get local pointer on KCM descriptor
    358         kcm_ptr = kcm_page->kcm;
    359 
    360 #if DEBUG_KCM
    361 thread_t * this  = CURRENT_THREAD;
    362 uint32_t   cycle = (uint32_t)hal_get_cycles();
    363 if( (DEBUG_KCM < cycle) && (local_cxy == 1) )
    364 {
    365 printk("\n[%s] thread[%x,%x] enters / order %d / block %x / page %x / kcm %x / status [%x,%x]\n",
    366 __FUNCTION__, this->process->pid, this->trdid, kcm_ptr->order, block_ptr, kcm_page, kcm_ptr,
    367 GET_CXY(kcm_page->status), GET_PTR(kcm_page->status) );
    368 kcm_remote_display( local_cxy , kcm_ptr );
    369 }
    370 #endif
     363    intptr_t kcm_page_mask = (1 << (order + 6)) - 1;
     364        kcm_page = (kcm_page_t *)((intptr_t)block & ~kcm_page_mask);
    371365
    372366    // build extended pointer on local KCM lock
    373     xptr_t lock_xp = XPTR( local_cxy , &kcm_ptr->lock );
     367    xptr_t lock_xp = XPTR( local_cxy , &kcm->lock );
    374368
    375369        // get lock
    376370        remote_busylock_acquire( lock_xp );
    377371
    378         // release block
    379         kcm_put_block( kcm_ptr , kcm_page , block_ptr );
     372#if DEBUG_KCM
     373if( (DEBUG_KCM_ORDER == order) && (DEBUG_KCM_CXY == local_cxy) && (DEBUG_KCM < cycle) )
     374printk("\n[%s] exit  / order %d / kcm[%x,%x] / nb_full %d / nb_active %d\n"
     375"    page %x / status [%x,%x] / count %d\n",
     376__FUNCTION__, order, local_cxy, kcm, kcm->full_pages_nr, kcm->active_pages_nr,
     377kcm_page, (uint32_t)(kcm_page->status>>32), (uint32_t)(kcm_page->status), kcm_page->count );
     378#endif
     379
     380        // release the block to the relevant page
     381        kcm_put_block( kcm , kcm_page , block );
     382
     383#if DEBUG_KCM
     384if( (DEBUG_KCM_ORDER == order) && (DEBUG_KCM_CXY == local_cxy) && (DEBUG_KCM < cycle) )
     385printk("\n[%s] exit  / order %d / kcm[%x,%x] / nb_full %d / nb_active %d\n"
     386"    page %x / status [%x,%x] / count %d\n",
     387__FUNCTION__, order, local_cxy, kcm, kcm->full_pages_nr, kcm->active_pages_nr,
     388kcm_page, (uint32_t)(kcm_page->status>>32), (uint32_t)(kcm_page->status), kcm_page->count );
     389#endif
    380390
    381391        // release lock
    382392        remote_busylock_release( lock_xp );
    383393
    384 #if DEBUG_KCM
    385 if( (DEBUG_KCM < cycle) && (local_cxy == 1) )
    386 {
    387 printk("\n[%s] thread[%x,%x] exit / order %d / page %x / status [%x,%x]\n",
    388 __FUNCTION__, this->process->pid, this->trdid, kcm_ptr->order, kcm_ptr,
    389 GET_CXY(kcm_page->status), GET_PTR(kcm_page->status) );
    390 kcm_remote_display( local_cxy , kcm_ptr );
    391 }
    392 #endif
    393 
    394394}  // end kcm_free()
    395395
     
    400400
    401401/////////////////////////////////////////////////////////////////////////////////////
    402 // This static function can be called by any thread running in any cluster.
     402// This static function is called by the kcm_remote_alloc() function.
     403// It can be called by any thread running in any cluster.
    403404// It returns a local pointer on a block allocated from an active kcm_page.
    404405// It makes a panic if no block available in the selected kcm_page.
     
    415416{
    416417    uint32_t order  = hal_remote_l32( XPTR( kcm_cxy , &kcm_ptr->order ) );
    417     uint32_t max    = hal_remote_l32( XPTR( kcm_cxy , &kcm_ptr->max_blocks ) );
    418418    uint32_t count  = hal_remote_l32( XPTR( kcm_cxy , &kcm_page->count ) );
    419419    uint64_t status = hal_remote_l64( XPTR( kcm_cxy , &kcm_page->status ) );
    420     uint32_t size   = 1 << order;
    421 
    422 assert( __FUNCTION__, (count < max) , "kcm_page should not be full" );
     420
     421// check kcm_page not full
     422assert( __FUNCTION__, (count < 63) ,
     423"kcm_page should not be full / cxy %x / order %d / count %d", kcm_cxy, order, count );
    423424
    424425    uint32_t index  = 1;
     
    427428        // allocate first free block in kcm_page, update status,
    428429    // and count , compute index of allocated block in kcm_page
    429     while( index <= max )
     430    while( index <= 63 )
    430431    {
    431432        if( (status & mask) == 0 )   // block found
     
    440441    }
    441442
    442         // change the page list if found block is the last
    443         if( count == max-1 )
     443        // swich the page to full if last block
     444        if( (count + 1) == 63 )
    444445        {
    445446                list_remote_unlink( kcm_cxy , &kcm_page->list );
     
    451452
    452453        // compute return pointer
    453         void * ptr = (void *)((intptr_t)kcm_page + (index * size) );
    454 
    455 #if DEBUG_KCM_REMOTE
    456 thread_t * this  = CURRENT_THREAD;
    457 uint32_t   cycle = (uint32_t)hal_get_cycles();
    458 if( DEBUG_KCM_REMOTE < cycle )
    459 printk("\n[%s] thread[%x,%x] get block %x in page %x / cluster %x / size %x / count %d\n",
    460 __FUNCTION__, this->process->pid, this->trdid,
    461 ptr, kcm_page, kcm_cxy, size, count + 1 );
    462 #endif
     454        void * ptr = (void *)((intptr_t)kcm_page + (index << order));
    463455
    464456        return ptr;
     
    467459
    468460/////////////////////////////////////////////////////////////////////////////////////
    469 // This private static function can be called by any thread running in any cluster.
     461// This static function is called by the kcm_remote_free() function.
     462// It can be called by any thread running in any cluster.
    470463// It releases a previously allocated block to the relevant kcm_page.
    471464// It changes the kcm_page status as required.
     
    481474                                                             void       * block_ptr )
    482475{
    483     uint32_t max    = hal_remote_l32( XPTR( kcm_cxy , &kcm_ptr->max_blocks ) );
    484476    uint32_t order  = hal_remote_l32( XPTR( kcm_cxy , &kcm_ptr->order ) );
    485477    uint32_t count  = hal_remote_l32( XPTR( kcm_cxy , &kcm_page->count ) );
    486478    uint64_t status = hal_remote_l64( XPTR( kcm_cxy , &kcm_page->status ) );
    487     uint32_t size   = 1 << order;
    488479   
    489         // compute block index from block pointer
    490         uint32_t index = ((intptr_t)block_ptr - (intptr_t)kcm_page) / size;
     480        // compute block index from block pointer and kcm_page pointer
     481        uint32_t index = ((intptr_t)block_ptr - (intptr_t)kcm_page) >> order;
    491482
    492483    // compute mask in bit vector
     
    497488        printk("\n[WARNING] in %s : block[%x,%x] not allocated / kcm %x / kcm_page %x\n",
    498489        __FUNCTION__, kcm_cxy, block_ptr, kcm_ptr, kcm_page );
    499         printk("   status %L / mask %L / sts & msk %L\n", status, mask, (status & mask) );
    500490        kcm_remote_display( kcm_cxy , kcm_ptr );
    501491        return;
     
    506496        hal_remote_s32( XPTR( kcm_cxy , &kcm_page->count  ) , count - 1 );
    507497
    508         // change the page list if page was full
    509         if( count == max )
     498        // switch the page to active if page was full
     499        if( count == 63 )
    510500        {
    511501                list_remote_unlink( kcm_cxy , &kcm_page->list );
     
    516506        }
    517507
    518 #if (DEBUG_KCM_REMOTE & 1)
    519 thread_t * this  = CURRENT_THREAD;
    520 uint32_t   cycle = (uint32_t)hal_get_cycles();
    521 if( DEBUG_KCM_REMOTE < cycle )
    522 printk("\n[%s] thread[%x,%x] block %x / page %x / cluster %x / size %x / count %d\n",
    523 __FUNCTION__, this->process->pid, this->trdid, block_ptr, kcm_page, size, count - 1 )
    524 #endif
    525 
    526508}  // end kcm_remote_put_block()
    527509
    528510/////////////////////////////////////////////////////////////////////////////////////
    529 // This private static function can be called by any thread running in any cluster.
     511// This static function can be called by any thread running in any cluster.
    530512// It gets one non-full KCM page from the remote KCM.
    531513// It allocates a page from remote PPM to populate the freelist, and initialises
     
    545527    else                            // allocate a new page from PPM
    546528        {
    547         // get one 4 Kbytes page from remote PPM
    548         xptr_t page_xp = ppm_remote_alloc_pages( kcm_cxy , 0 );
    549 
     529        // get KCM order
     530        uint32_t order = hal_remote_l32( XPTR( kcm_cxy , &kcm_ptr->order ));
     531
     532        // get one kcm_page from PPM
     533        xptr_t page_xp = ppm_remote_alloc_pages( kcm_cxy,
     534                                                 order + 6 - CONFIG_PPM_PAGE_ORDER );
    550535            if( page_xp == XPTR_NULL )
    551536            {
    552                     printk("\n[ERROR] in %s : failed to allocate page in cluster %x\n",
    553                 __FUNCTION__ , kcm_cxy );
    554 
     537
     538#if DEBUG_KCM_ERROR
     539printk("\n[ERROR] in %s : failed to allocate page in cluster %x\n",
     540__FUNCTION__ , kcm_cxy );
     541#endif
    555542                    return NULL;
    556543        }
     
    585572    void       * block_ptr;
    586573
    587     if( order < 6 ) order = 6;
    588 
    589 assert( __FUNCTION__, (order < 12) , "order = %d / must be less than 12" , order );
    590 
    591     // get local pointer on relevant KCM allocator
     574// check kcm_cxy argument
     575assert( __FUNCTION__, cluster_is_active( kcm_cxy ),
     576"cluster %x not active", kcm_cxy );
     577
     578// check order argument
     579assert( __FUNCTION__, (order < CONFIG_PPM_PAGE_ORDER) ,
     580"order argument %d too large", order );
     581
     582    // smallest size is a cache line
     583    if( order < CONFIG_CACHE_LINE_ORDER ) order = CONFIG_CACHE_LINE_ORDER;
     584
     585    // get local pointer on relevant KCM allocator (same in all clusters)
    592586    kcm_ptr = &LOCAL_CLUSTER->kcm[order - 6];
    593587
     
    607601        }
    608602
     603#if DEBUG_KCM
     604uint32_t cycle     = (uint32_t)hal_get_cycles();
     605uint32_t nb_full   = hal_remote_l32( XPTR( kcm_cxy , &kcm_ptr->full_pages_nr ));
     606uint32_t nb_active = hal_remote_l32( XPTR( kcm_cxy , &kcm_ptr->active_pages_nr ));
     607uint64_t status    = hal_remote_l64( XPTR( kcm_cxy , &kcm_page->status ));
     608uint32_t count     = hal_remote_l32( XPTR( kcm_cxy , &kcm_page->count ));
     609#endif
     610
     611
     612#if DEBUG_KCM
     613if( (DEBUG_KCM_ORDER == order) && (DEBUG_KCM_CXY == local_cxy) && (DEBUG_KCM < cycle) )
     614printk("\n[%s] enter / order %d / kcm[%x,%x] / nb_full %d / nb_active %d\n"
     615"    page %x / status [%x,%x] / count %d\n",
     616__FUNCTION__, order, kcm_cxy, kcm_ptr, nb_full, nb_active,
     617kcm_page, (uint32_t)(status>>32), (uint32_t)(status), kcm_page->count );
     618#endif
     619
    609620        // get a block from selected active page
    610621        block_ptr = kcm_remote_get_block( kcm_cxy , kcm_ptr , kcm_page );
    611622
     623#if DEBUG_KCM
     624if( (DEBUG_KCM_ORDER == order) && (DEBUG_KCM_CXY == local_cxy) && (DEBUG_KCM < cycle) )
     625printk("\n[%s] exit  / order %d / kcm[%x,%x] / nb_full %d / nb_active %d\n"
     626"    page %x / status [%x,%x] / count %d\n",
     627__FUNCTION__, order, kcm_cxy, kcm_ptr, nb_full, nb_active,
     628kcm_page, (uint32_t)(status>>32), (uint32_t)(status), kcm_page->count );
     629#endif
     630
    612631        // release lock
    613632        remote_busylock_release( lock_xp );
    614633
    615 #if DEBUG_KCM_REMOTE
    616 thread_t * this  = CURRENT_THREAD;
    617 uint32_t   cycle = (uint32_t)hal_get_cycles();
    618 if( DEBUG_KCM_REMOTE < cycle )
    619 printk("\n[%s] thread[%x,%x] allocated block %x / order %d / kcm[%x,%x]\n",
    620 __FUNCTION__, this->process->pid, this->trdid, block_ptr, order, kcm_cxy, kcm_ptr );
    621 #endif
    622 
    623634        return block_ptr;
    624635
    625636}  // end kcm_remote_alloc()
    626637
    627 /////////////////////////////////////
    628 void kcm_remote_free( cxy_t  kcm_cxy,
    629                       void * block_ptr )
     638////////////////////////////////////////
     639void kcm_remote_free( cxy_t     kcm_cxy,
     640                      void    * block_ptr,
     641                      uint32_t  order )
    630642{
    631643        kcm_t      * kcm_ptr;
    632644        kcm_page_t * kcm_page;
    633645
    634 // check argument
    635 assert( __FUNCTION__, (block_ptr != NULL) , "block pointer cannot be NULL" );
    636 
    637     // get local pointer on remote KCM page
    638         kcm_page = (kcm_page_t *)((intptr_t)block_ptr & ~CONFIG_PPM_PAGE_MASK);
    639 
    640     // get local pointer on remote KCM
    641         kcm_ptr = hal_remote_lpt( XPTR( kcm_cxy , &kcm_page->kcm ) );
     646// check kcm_cxy argument
     647assert( __FUNCTION__, cluster_is_active( kcm_cxy ),
     648"cluster %x not active", kcm_cxy );
     649
     650// check block_ptr argument
     651assert( __FUNCTION__, (block_ptr != NULL),
     652"block pointer cannot be NULL" );
     653
     654// check order argument
     655assert( __FUNCTION__, (order < CONFIG_PPM_PAGE_ORDER) ,
     656"order argument %d too large", order );
     657
     658    // smallest block size is a cache line
     659    if (order < CONFIG_CACHE_LINE_ORDER) order = CONFIG_CACHE_LINE_ORDER;
     660
     661    // get local pointer on relevant KCM allocator (same in all clusters)
     662    kcm_ptr = &LOCAL_CLUSTER->kcm[order - CONFIG_CACHE_LINE_ORDER];
     663
     664    // get local pointer on KCM page
     665    intptr_t kcm_page_mask = (1 << (order + 6)) - 1;
     666        kcm_page = (kcm_page_t *)((intptr_t)block_ptr & ~kcm_page_mask);
     667
     668#if DEBUG_KCM
     669uint32_t cycle     = (uint32_t)hal_get_cycles();
     670uint32_t nb_full   = hal_remote_l32( XPTR( kcm_cxy , &kcm_ptr->full_pages_nr ));
     671uint32_t nb_active = hal_remote_l32( XPTR( kcm_cxy , &kcm_ptr->active_pages_nr ));
     672uint64_t status    = hal_remote_l64( XPTR( kcm_cxy , &kcm_page->status ));
     673uint32_t count     = hal_remote_l32( XPTR( kcm_cxy , &kcm_page->count ));
     674#endif
    642675
    643676    // build extended pointer on remote KCM lock
     
    647680        remote_busylock_acquire( lock_xp );
    648681
    649         // release block
     682#if DEBUG_KCM
     683if( (DEBUG_KCM_ORDER == order) && (DEBUG_KCM_CXY == local_cxy) && (DEBUG_KCM < cycle) )
     684printk("\n[%s] enter / order %d / kcm[%x,%x] / nb_full %d / nb_active %d\n"
     685"    page %x / status [%x,%x] / count %d\n",
     686__FUNCTION__, order, kcm_cxy, kcm_ptr, nb_full, nb_active,
     687kcm_page, (uint32_t)(status>>32), (uint32_t)(status), kcm_page->count );
     688#endif
     689
     690        // release the block to the relevant page
    650691        kcm_remote_put_block( kcm_cxy , kcm_ptr , kcm_page , block_ptr );
     692
     693#if DEBUG_KCM
     694if( (DEBUG_KCM_ORDER == order) && (DEBUG_KCM_CXY == local_cxy) && (DEBUG_KCM < cycle) )
     695printk("\n[%s] exit  / order %d / kcm[%x,%x] / nb_full %d / nb_active %d\n"
     696"    page %x / status [%x,%x] / count %d\n",
     697__FUNCTION__, order, kcm_cxy, kcm_ptr, nb_full, nb_active,
     698kcm_page, (uint32_t)(status>>32), (uint32_t)(status), kcm_page->count );
     699#endif
    651700
    652701        // release lock
    653702        remote_busylock_release( lock_xp );
    654 
    655 #if DEBUG_KCM_REMOTE
    656 thread_t * this  = CURRENT_THREAD;
    657 uint32_t   cycle = (uint32_t)hal_get_cycles();
    658 uint32_t   order = hal_remote_l32( XPTR( kcm_cxy , &kcm_ptr->order ) );
    659 if( DEBUG_KCM_REMOTE < cycle )
    660 printk("\n[%s] thread[%x,%x] released block %x / order %d / kcm[%x,%x]\n",
    661 __FUNCTION__, this->process->pid, this->trdid, block_ptr, order, kcm_cxy, kcm_ptr );
    662 #endif
    663703
    664704}  // end kcm_remote_free
     
    673713    uint32_t       count;
    674714
     715    // get pointers on TXT0 chdev
     716    xptr_t    txt0_xp  = chdev_dir.txt_tx[0];
     717    cxy_t     txt0_cxy = GET_CXY( txt0_xp );
     718    chdev_t * txt0_ptr = GET_PTR( txt0_xp );
     719
     720    // get extended pointer on remote TXT0 chdev lock
     721    xptr_t    txt0_lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock );
     722
     723    // get TXT0 lock
     724    remote_busylock_acquire( txt0_lock_xp );
     725
    675726    uint32_t order           = hal_remote_l32( XPTR( kcm_cxy , &kcm_ptr->order) );
    676727    uint32_t full_pages_nr   = hal_remote_l32( XPTR( kcm_cxy , &kcm_ptr->full_pages_nr ) );
    677728    uint32_t active_pages_nr = hal_remote_l32( XPTR( kcm_cxy , &kcm_ptr->active_pages_nr ) );
    678729
    679         printk("*** KCM : cxy %x / order %d / full_pages_nr %d / active_pages_nr %d\n",
     730        nolock_printk("*** KCM : cxy %x / order %d / full_pages_nr %d / active_pages_nr %d\n",
    680731        kcm_cxy, order, full_pages_nr, active_pages_nr );
    681732
     
    688739            count    = hal_remote_l32( XPTR( kcm_cxy , &kcm_page->count ) );
    689740
    690             printk("- active page %x / status (%x,%x) / count %d\n",
    691             kcm_page, GET_CXY( status ), GET_PTR( status ), count );
     741            nolock_printk("- active page %x / status (%x,%x) / count %d\n",
     742            kcm_page, (uint32_t)( status<< 32 ), (uint32_t)( status ), count );
    692743        }
    693744    }
     
    701752            count    = hal_remote_l32( XPTR( kcm_cxy , &kcm_page->count ) );
    702753
    703             printk("- full page %x / status (%x,%x) / count %d\n",
    704             kcm_page, GET_CXY( status ), GET_PTR( status ), count );
     754            nolock_printk("- full page %x / status (%x,%x) / count %d\n",
     755            kcm_page, (uint32_t)( status<< 32 ), (uint32_t)( status ), count );
    705756        }
    706757    }
     758
     759    // release TXT0 lock
     760    remote_busylock_release( txt0_lock_xp );
     761
    707762}  // end kcm remote_display()
  • trunk/kernel/mm/kcm.h

    r672 r683  
    3232#include <kmem.h>
    3333
    34 
    35 #define KCM_PAGE_FULL     0
    36 #define KCM_PAGE_EMPTY    1
    37 #define KCM_PAGE_ACTIVE   2
    38 
    3934/****************************************************************************************
    40  * This structure defines a generic Kernel Cache Manager, that is a block allocator,
    41  * for fixed size objects. It exists in each cluster a specific KCM allocator for
    42  * the following block sizes: 64, 128, 256, 512, 1024, 2048 bytes.
    43  * These six KCM allocators are initialized by the cluster_init() function.
     35 * This structure defines a generic Kernel Cache Manager, a fixed size block allocator.
     36 * It returns an aligned block whose size is a power of 2, not smaller than a cache line,
     37 * but smaller than a small PPM page. It exists in each cluster a specific KCM allocator
     38 * for each possible block size. When the cache line contains 64 bytes and the page
     39 * contains 4K bytes, the possible block sizes are 64, 128, 256, 512, 1024, 2048 bytes.
     40 * These KCM allocators are initialized by the cluster_init() function.
    4441 *
    45  * Each KCM cache is implemented as a set o 4 Kbytes pages. A kcm_page is split in slots,
    46  * where each slot can contain one block. in each kcm_page, the first slot (that cannot
    47  * be smaller than 64 bytes) contains the kcm page descriptor, defined below
     42 * Each KCM cache is implemented as a set of "kcm_pages": a "kcm_page" is an aligned
     43 * buffer in physical memory (allocated by the PPM allocator) such as :
     44 *       buffer_size = block_size * 64  <=>  buffer_order = block_order + 6.
     45 *
     46 * A kcm_page contains always 64 kcm_blocks, but the first block (that cannot be smaller
     47 * than 64 bytes) is used to store the kcm_page descriptor defining the page allocation
     48 * status, and cannot be allocated to store data.
     49 *
     50 * A KCM cache is extensible, as new kcm_pages are dynamically allocated from the PPM
     51 * allocator when required. For a given KCM cache the set of kcm_pages is split in two
     52 * lists: the list of "full" pages (containing 63 allocated blocks), and the list of
     53 * "active" pages (containing at least one free block). An "empty" page (containing
     54 * only free blocks) is considered active, and is not released to PPM.
    4855 *
    4956 * To allow any thread running in any cluster to directly access the KCM of any cluster,
     
    6269
    6370        uint32_t             order;            /*! ln( block_size )                        */
    64         uint32_t             max_blocks;       /*! max number of blocks per page           */
    6571}
    6672kcm_t;
     
    8490        list_entry_t        list;              /*! [active / busy / free] list member      */
    8591        kcm_t             * kcm;               /*! pointer on kcm allocator                */
    86         page_t            * page;              /*! pointer on the physical page descriptor */
     92        page_t            * page;              /*! pointer on physical page descriptor    */
    8793}
    8894kcm_page_t;
     
    120126 ****************************************************************************************
    121127 * @ block_ptr   : local pointer on the released block.
     128 * @ order       : log2( block_size in bytes ).
    122129 ***************************************************************************************/
    123 void kcm_free( void    * block_ptr );
     130void kcm_free( void    * block_ptr,
     131               uint32_t  order );
    124132
    125133
     
    143151 * @ kcm_cxy     : remote KCM cluster identifier.
    144152 * @ block_ptr   : local pointer on the released buffer in remote cluster.
     153 * @ order       : log2( block_size in bytes ).
    145154 ***************************************************************************************/
    146155void kcm_remote_free( cxy_t     kcm_cxy,
    147                       void    * block_ptr );
     156                      void    * block_ptr,
     157                      uint32_t  order );
    148158
    149159/****************************************************************************************
  • trunk/kernel/mm/khm.c

    r672 r683  
    4040{
    4141        // check config parameters
    42         assert( __FUNCTION__, ((CONFIG_PPM_PAGE_SHIFT + CONFIG_PPM_HEAP_ORDER) < 32 ) ,
     42        assert( __FUNCTION__, ((CONFIG_PPM_PAGE_ORDER + CONFIG_PPM_HEAP_ORDER) < 32 ) ,
    4343                 "CONFIG_PPM_HEAP_ORDER too large" );
    4444
     
    4747
    4848        // compute kernel heap size
    49         intptr_t heap_size = (1 << CONFIG_PPM_HEAP_ORDER) << CONFIG_PPM_PAGE_SHIFT;
     49        intptr_t heap_size = (1 << CONFIG_PPM_HEAP_ORDER) << CONFIG_PPM_PAGE_ORDER;
    5050
    5151        // get kernel heap base from PPM
  • trunk/kernel/mm/kmem.c

    r672 r683  
    22 * kmem.c - kernel memory allocator implementation.
    33 *
    4  * Authors  Alain Greiner (2016,2017,2018,2019,2020)
     4 * Authors  Alain Greiner     (2016,2017,2018,2019,2020)
    55 *
    66 * Copyright (c) UPMC Sorbonne Universites
     
    2929#include <thread.h>
    3030#include <memcpy.h>
    31 #include <khm.h>
    3231#include <ppm.h>
    3332#include <kcm.h>
     
    3534#include <kmem.h>
    3635
    37 /////////////////////////////////////
    38 void * kmem_alloc( kmem_req_t * req )
    39 {
    40         uint32_t    type;    // KMEM_PPM / KMEM_KCM / KMEM_KHM
    41         uint32_t    flags;   // AF_NONE / AF_ZERO / AF_KERNEL
    42         uint32_t    order;   // PPM: ln(pages) / KCM: ln(bytes) / KHM: bytes
    43 
    44         type  = req->type;
    45         order = req->order;
    46         flags = req->flags;
    47 
    48     //////////////////////
    49         if( type == KMEM_PPM )
    50         {
    51                 // allocate the number of requested pages
    52                 page_t * page_ptr = (void *)ppm_alloc_pages( order );
    53 
    54                 if( page_ptr == NULL )
    55                 {
    56                         printk("\n[ERROR] in %s : PPM failed / order %d / cluster %x\n",
    57                         __FUNCTION__ , order , local_cxy );
    58                         return NULL;
    59                 }
    60 
    61         xptr_t page_xp = XPTR( local_cxy , page_ptr );
    62 
    63                 // reset page if requested
    64                 if( flags & AF_ZERO ) page_zero( page_ptr );
    65 
    66         // get pointer on buffer from the page descriptor
    67         void * ptr = GET_PTR( ppm_page2base( page_xp ) );
    68 
    69 #if DEBUG_KMEM
     36///////////////////////////////////
     37void * kmem_alloc( uint32_t  order,
     38                   uint32_t  flags )
     39{
     40
     41#if DEBUG_KMEM || DEBUG_KMEM_ERROR
    7042thread_t * this  = CURRENT_THREAD;
    7143uint32_t   cycle = (uint32_t)hal_get_cycles();
    72 if( DEBUG_KMEM < cycle )
    73 printk("\n[%s] thread[%x,%x] from PPM / %d page(s) / ppn %x / cxy %x / cycle %d\n",
    74 __FUNCTION__, this->process->pid, this->trdid,
    75 1<<order, ppm_page2ppn(XPTR(local_cxy,ptr)), local_cxy, cycle );
     44#endif
     45
     46        if( order >= CONFIG_PPM_PAGE_ORDER )     // use PPM
     47        {
     48                // allocate memory from PPM
     49                page_t * page = (void *)ppm_alloc_pages( order - CONFIG_PPM_PAGE_ORDER );
     50
     51                if( page == NULL )
     52                {
     53
     54#if DEBUG_KMEM_ERROR
     55if (DEBUG_KMEM_ERROR < cycle)
     56printk("\n[ERROR] in %s : thread[%x,%x] failed for PPM / order %d / cluster %x / cycle %d\n",
     57__FUNCTION__ , this->process->pid , this->trdid , order , local_cxy , cycle );
     58#endif
     59                        return NULL;
     60                }
     61
     62                // reset page if requested
     63                if( flags & AF_ZERO ) page_zero( page );
     64
     65        // get pointer on buffer from the page descriptor
     66        xptr_t page_xp = XPTR( local_cxy , page );
     67        void * ptr     = GET_PTR( ppm_page2base( page_xp ) );
     68
     69#if DEBUG_KMEM
     70if( (DEBUG_KMEM < cycle) && (DEBUG_KMEM_CXY == local_cxy) && (DEBUG_KMEM_ORDER == order) )
     71printk("\n[%s] thread[%x,%x] from PPM / order %d / ppn %x / cxy %x / cycle %d\n",
     72__FUNCTION__, this->process->pid, this->trdid,
     73order, ppm_page2ppn(XPTR(local_cxy,ptr)), local_cxy, cycle );
    7674#endif
    7775        return ptr;
    7876        }
    79     ///////////////////////////
    80         else if( type == KMEM_KCM )
     77        else                                     // use KCM
    8178        {
    8279                // allocate memory from KCM
     
    8582                if( ptr == NULL )
    8683                {
    87                         printk("\n[ERROR] in %s : KCM failed / order %d / cluster %x\n",
    88                     __FUNCTION__ , order , local_cxy );
     84
     85#if DEBUG_KMEM_ERROR
     86if (DEBUG_KMEM_ERROR < cycle)
     87printk("\n[ERROR] in %s : thread[%x,%x] failed for KCM / order %d / cluster %x / cycle %d\n",
     88__FUNCTION__ , this->process->pid , this->trdid , order , local_cxy , cycle );
     89#endif
    8990                        return NULL;
    9091                }
     
    9495
    9596#if DEBUG_KMEM
    96 thread_t * this  = CURRENT_THREAD;
    97 uint32_t   cycle = (uint32_t)hal_get_cycles();
    98 if( DEBUG_KMEM < cycle )
    99 printk("\n[%s] thread [%x,%x] from KCM / %d bytes / base %x / cxy %x / cycle %d\n",
    100 __FUNCTION__, this->process->pid, this->trdid,
    101 1<<order, ptr, local_cxy, cycle );
     97if( (DEBUG_KMEM < cycle) && (DEBUG_KMEM_CXY == local_cxy) && (DEBUG_KMEM_ORDER == order) )
     98printk("\n[%s] thread [%x,%x] from KCM / order %d / base %x / cxy %x / cycle %d\n",
     99__FUNCTION__, this->process->pid, this->trdid,
     100order, ptr, local_cxy, cycle );
    102101#endif
    103102        return ptr;
    104103        }
    105     ///////////////////////////
    106         else if( type == KMEM_KHM )
    107         {
    108                 // allocate memory from KHM
    109                 void * ptr = khm_alloc( &LOCAL_CLUSTER->khm , order );
    110 
    111                 if( ptr == NULL )
    112                 {
    113                         printk("\n[ERROR] in %s : KHM failed / order %d / cluster %x\n",
    114                         __FUNCTION__ , order , local_cxy );
    115                         return NULL;
    116                 }
    117 
    118                 // reset memory if requested
    119                 if( flags & AF_ZERO ) memset( ptr , 0 , order );
    120 
    121 #if DEBUG_KMEM
    122 thread_t * this  = CURRENT_THREAD;
    123 uint32_t   cycle = (uint32_t)hal_get_cycles();
    124 if( DEBUG_KMEM < cycle )
    125 printk("\n[%s] thread[%x,%x] from KHM / %d bytes / base %x / cxy %x / cycle %d\n",
    126 __FUNCTION__, this->process->pid, this->trdid,
    127 order, ptr, local_cxy, cycle );
    128 #endif
    129         return ptr;
    130         }
    131     else
    132     {
    133         printk("\n[ERROR] in %s : illegal allocator type\n", __FUNCTION__);
    134         return NULL;
    135     }
    136104}  // end kmem_alloc()
    137105
    138 //////////////////////////////////
    139 void kmem_free( kmem_req_t * req )
    140 {
    141     uint32_t type = req->type;
    142 
    143     //////////////////////
    144         if( type == KMEM_PPM )
    145         {
    146         page_t * page = GET_PTR( ppm_base2page( XPTR( local_cxy , req->ptr ) ) );
     106//////////////////////////////
     107void kmem_free( void    * ptr,
     108                uint32_t  order )
     109{
     110        if( order >= CONFIG_PPM_PAGE_ORDER )     // use PPM
     111        {
     112        page_t * page = GET_PTR( ppm_base2page( XPTR( local_cxy , ptr ) ) );
    147113
    148114        ppm_free_pages( page );
    149115    }
    150     ///////////////////////////
    151     else if( type == KMEM_KCM )
     116        else                                     // use KCM
    152117    {
    153         kcm_free( req->ptr );
    154         }
    155     ///////////////////////////
    156     else if( type == KMEM_KHM )
    157     {
    158         khm_free( req->ptr );
    159     }
    160     else
    161     {
    162         printk("\n[ERROR] in %s : illegal allocator type\n", __FUNCTION__);
    163     }
     118        kcm_free( ptr , order );
     119        }
    164120}  // end kmem_free()
    165121
    166 ///////////////////////////////////////////
    167 void * kmem_remote_alloc( cxy_t        cxy,
    168                           kmem_req_t * req )
    169 {
    170         uint32_t    type;    // KMEM_PPM / KMEM_KCM / KMEM_KHM
    171         uint32_t    flags;   // AF_ZERO / AF_KERNEL / AF_NONE
    172         uint32_t    order;   // PPM: ln(pages) / KCM: ln(bytes) / KHM: bytes
    173 
    174         type  = req->type;
    175         order = req->order;
    176         flags = req->flags;
    177 
    178         //////////////////////
    179         if( type == KMEM_PPM )
    180         {
    181                 // allocate the number of requested pages from remote cluster
    182                 xptr_t page_xp = ppm_remote_alloc_pages( cxy , order );
     122
     123
     124////////////////////////////////////////
     125void * kmem_remote_alloc( cxy_t     cxy,
     126                          uint32_t  order,
     127                          uint32_t  flags )
     128{
     129
     130#if DEBUG_KMEM || DEBUG_KMEM_ERROR
     131thread_t * this = CURRENT_THREAD;
     132uint32_t   cycle = (uint32_t)hal_get_cycles();
     133#endif
     134
     135        if( order >= CONFIG_PPM_PAGE_ORDER )     // use PPM
     136        {
     137                // allocate memory from PPM
     138                xptr_t page_xp = ppm_remote_alloc_pages( cxy , order - CONFIG_PPM_PAGE_ORDER );
    183139
    184140                if( page_xp == XPTR_NULL )
    185141                {
    186                         printk("\n[ERROR] in %s : failed for PPM / order %d in cluster %x\n",
    187                         __FUNCTION__ , order , cxy );
     142
     143#if DEBUG_KMEM_ERROR
     144if( DEBUG_KMEM_ERROR < cycle )
     145printk("\n[ERROR] in %s : thread[%x,%x] failed for PPM / order %d / cluster %x / cycle %d\n",
     146__FUNCTION__ , this->process->pid , this->trdid , order , cxy , cycle );
     147#endif
    188148                        return NULL;
    189149                }
     
    192152        xptr_t base_xp = ppm_page2base( page_xp );
    193153
    194                 // reset page if requested
    195                 if( flags & AF_ZERO ) hal_remote_memset( base_xp , 0 , CONFIG_PPM_PAGE_SIZE );
    196 
    197 
    198 #if DEBUG_KMEM_REMOTE
    199 thread_t * this = CURRENT_THREAD;
    200 uint32_t   cycle = (uint32_t)hal_get_cycles();
    201 if( DEBUG_KMEM_REMOTE < cycle )
    202 printk("\n[%s] thread[%x,%x] from PPM / %d page(s) / ppn %x / cxy %x / cycle %d\n",
    203 __FUNCTION__, this->process->pid, this->trdid,
    204 1<<order, ppm_page2ppn( page_xp ), cxy, cycle );
     154                // reset memory if requested
     155                if( flags & AF_ZERO ) hal_remote_memset( base_xp , 0 , 1<<order );
     156
     157#if DEBUG_KMEM
     158if( (DEBUG_KMEM < cycle) && (DEBUG_KMEM_CXY == local_cxy) && (DEBUG_KMEM_ORDER == order) )
     159printk("\n[%s] thread[%x,%x] from PPM / order %d / ppn %x / cxy %x / cycle %d\n",
     160__FUNCTION__, this->process->pid, this->trdid,
     161order, ppm_page2ppn( page_xp ), cxy, cycle );
    205162#endif
    206163        return GET_PTR( base_xp );
    207164        }
    208     ///////////////////////////
    209         else if( type == KMEM_KCM )
     165        else                                     // use KCM
    210166        {
    211167                // allocate memory from KCM
     
    214170                if( ptr == NULL )
    215171                {
    216                         printk("\n[ERROR] in %s : failed for KCM / order %d in cluster %x\n",
    217                     __FUNCTION__ , order , cxy );
     172
     173#if DEBUG_KMEM_ERROR
     174if( DEBUG_KMEM_ERROR < cycle )
     175printk("\n[ERROR] in %s : thread[%x,%x] failed for KCM / order %d / cluster %x / cycle %d\n",
     176__FUNCTION__ , this->process->pid , this->trdid , order , cxy , cycle );
     177#endif
    218178                        return NULL;
    219179                }
     
    222182                if( flags & AF_ZERO )  hal_remote_memset( XPTR( cxy , ptr ) , 0 , 1<<order );
    223183
    224 #if DEBUG_KMEM_REMOTE
    225 thread_t * this = CURRENT_THREAD;
    226 uint32_t   cycle = (uint32_t)hal_get_cycles();
    227 if( DEBUG_KMEM_REMOTE < cycle )
    228 printk("\n[%s] thread [%x,%x] from KCM / %d bytes / base %x / cxy %x / cycle %d\n",
    229 __FUNCTION__, this->process->pid, this->trdid,
    230 1<<order, ptr, cxy, cycle );
     184#if DEBUG_KMEM
     185if( (DEBUG_KMEM < cycle) && (DEBUG_KMEM_CXY == local_cxy) && (DEBUG_KMEM_ORDER == order) )
     186printk("\n[%s] thread [%x,%x] from KCM / order %d / base %x / cxy %x / cycle %d\n",
     187__FUNCTION__, this->process->pid, this->trdid,
     188order, ptr, cxy, cycle );
    231189#endif
    232190        return ptr;
    233191        }
    234         ///////////////////////////
    235         else if( type == KMEM_KHM )               
    236         {
    237         printk("\n[ERROR] in %s : remote access not supported for KHM\n", __FUNCTION__  );
    238                 return NULL;
    239         }
    240     else
    241     {
    242         printk("\n[ERROR] in %s : illegal allocator type\n", __FUNCTION__);
    243         return NULL;
    244     }
    245192}  // kmem_remote_malloc()
    246193
    247 ////////////////////////////////////////
    248 void kmem_remote_free( cxy_t        cxy,
    249                        kmem_req_t * req )
    250 {
    251     uint32_t type = req->type;
    252 
    253     //////////////////////
    254         if( type == KMEM_PPM )
    255         {
    256         page_t * page = GET_PTR( ppm_base2page( XPTR( cxy , req->ptr ) ) );
     194/////////////////////////////////////
     195void kmem_remote_free( cxy_t     cxy,
     196                       void    * ptr,
     197                       uint32_t  order )
     198{
     199        if( order >= CONFIG_PPM_PAGE_ORDER )     // use PPM
     200        {
     201        page_t * page = GET_PTR( ppm_base2page( XPTR( cxy , ptr ) ) );
    257202
    258203        ppm_remote_free_pages( cxy , page );
    259204    }
    260     ///////////////////////////
    261     else if( type == KMEM_KCM )
     205        else                                     // use KCM
    262206    {
    263         kcm_remote_free( cxy , req->ptr );
    264         }
    265     ///////////////////////////
    266     else if( type == KMEM_KHM )
    267     {
    268         printk("\n[ERROR] in %s : remote access not supported for KHM\n", __FUNCTION__ );
    269     }
    270     else
    271     {
    272         printk("\n[ERROR] in %s : illegal allocator type\n", __FUNCTION__);
    273     }
     207        kcm_remote_free( cxy , ptr , order );
     208        }
    274209}  // end kmem_remote_free()
    275210
  • trunk/kernel/mm/kmem.h

    r656 r683  
    11/*
    2  * kmem.h - kernel unified memory allocator interface
     2 * kmem.h - unified kernel memory allocator definition
    33 *
    4  * Authors  Alain Greiner (2016,2017,2018,2019)
     4 * Authors  Alain Greiner     (2016,2017,2018,2019,2020)
    55 *
    66 * Copyright (c) UPMC Sorbonne Universites
     
    2929
    3030/*************************************************************************************
    31  * This enum defines the three Kernel Memory Allocaror types
    32  ************************************************************************************/
    33 
    34 enum
    35 {
    36     KMEM_PPM              = 0,   /*! PPM allocator                                  */
    37     KMEM_KCM              = 1,   /*! KCM allocator                                  */
    38     KMEM_KHM              = 2,   /*! KHM allocator                                  */
    39 };
    40 
    41 /*************************************************************************************
    4231 * This defines the generic Allocation Flags that can be associated to
    4332 * a Kernel Memory Request.
     
    4534
    4635#define AF_NONE       0x0000   // no attributes
    47 #define AF_KERNEL     0x0001   // for kernel use
    48 #define AF_ZERO       0x0002   // must be reset to 0
    49 
    50 /*************************************************************************************
    51  * This structure defines a Kernel Memory Request.
    52  ************************************************************************************/
    53 
    54 typedef struct kmem_req_s
    55 {
    56     uint32_t      type;   /*! KMEM_PPM / KMEM_KCM / KMEM_KHM                        */
    57     uint32_t      order;  /*! PPM: ln2(pages) / KCM: ln2(bytes) / KHM: bytes        */
    58     uint32_t      flags;  /*! request attributes                                    */
    59     void        * ptr;    /*! local pointer on allocated buffer (only used by free) */
    60 }
    61 kmem_req_t;
     36#define AF_KERNEL     0x0001   // for kernel use ???
     37#define AF_ZERO       0x0002   // data buffer must be reset to 0
    6238
    6339/*************************************************************************************
    6440 * These two functions allocate physical memory in a local or remote cluster
    65  * as specified by the kmem_req_t request descriptor, and return a local pointer
    66  * on the allocated buffer. It uses three specialised physical memory allocators:
    67  * - PPM (Physical Pages Manager) allocates N contiguous small physical pages.
    68  *       N is a power of 2, and req.order = ln(N). Implement the buddy algorithm.
    69  * - KCM (Kernel Cache Manager) allocates aligned blocks of M bytes from a cache.
    70  *       M is a power of 2, and req.order = ln( M ). One cache per block size.
    71  * - KHM (Kernel Heap Manager) allocates physical memory buffers of M bytes,
    72  *       M can have any value, and req.order = M.
    73  *
    74  * WARNING: the physical memory allocated with a given allocator type must be
    75  *          released using the same allocator type.
     41 * as specified by the <cxy>, <order> and <flags> arguments, and return a local
     42 * pointer on the allocated buffer. The buffer size (in bytes) is a power of 2,
     43 * equal to (1 << order) bytes. It can be initialized to zero if requested.
     44 * Depending on the <order> value, it uses two specialised allocators:
     45 * - When order is larger or equal to CONFIG_PPM_PAGE_ORDER, the PPM (Physical Pages
     46 *   Manager) allocates 2**(order - PPM_PAGE_ORDER) contiguous small physical pages.
     47 *   This allocator implements the buddy algorithm.
     48 * - When order is smaller than CONFIG_PPM_PAGE_ORDER, the KCM (Kernel Cache Manager)
     49 *   allocates an aligned block of 2**order bytes from specialised KCM[ORDER] caches
     50 *  (one KCM cache per block size). 
    7651 *************************************************************************************
    77  * @ cxy   : target cluster identifier for a remote access.
    78  * @ req   : local pointer on allocation request.
     52 * @ cxy    : [in] target cluster identifier for a remote access).
     53 * @ order  : [in] ln( block size in bytes).
     54 * @ flags  : [in] allocation flags defined above.
    7955 * @ return local pointer on allocated buffer if success / return NULL if no memory.
    8056 ************************************************************************************/
    81 void * kmem_alloc( kmem_req_t * req );
     57void * kmem_alloc( uint32_t  order,
     58                   uint32_t  flags );
    8259
    83 void * kmem_remote_alloc( cxy_t        cxy,
    84                           kmem_req_t * req );
     60void * kmem_remote_alloc( cxy_t     cxy,
     61                          uint32_t  order,
     62                          uint32_t  flags );
    8563
    8664/*************************************************************************************
    87  * These two functions release previously allocated physical memory, as specified
    88  * by the <type> and <ptr> fields of the kmem_req_t request descriptor.
     65 * These two functions release a previously allocated physical memory block,
     66 * as specified by the <cxy>, <order> and <ptr> arguments.
     67 * - When order is larger or equal to CONFIG_PPM_PAGE_ORDER, the PPM (Physical Pages
     68 *   Manager) releases 2**(order - PPM_PAGE_ORDER) contiguous small physical pages.
     69 *   This allocator implements the buddy algorithm.
     70 * - When order is smaller than CONFIG_PPM_PAGE_ORDER, the KCM (Kernel Cache Manager)
     71 *   release release the block of 2**order bytes to the specialised KCM[order] cache.
    8972 *************************************************************************************
    90  * @ cxy   : target cluster identifier for a remote access.
    91  * @ req : local pointer to request descriptor.
     73 * @ cxy    : [in] target cluster identifier for a remote access.
     74 * @ ptr    : [in] local pointer to released block.
     75 * @ order  : [in] ln( block size in bytes ).
    9276 ************************************************************************************/
    93 void  kmem_free ( kmem_req_t * req );
     77void  kmem_free( void    * ptr,
     78                 uint32_t  order );
    9479
    95 void  kmem_remote_free( cxy_t        cxy,
    96                         kmem_req_t * req );
     80void  kmem_remote_free( cxy_t     cxy,
     81                        void    * ptr,
     82                        uint32_t  order );
    9783
    9884
  • trunk/kernel/mm/mapper.c

    r672 r683  
    33 *
    44 * Authors   Mohamed Lamine Karaoui (2015)
    5  *           Alain Greiner (2016,2017,2018,2019,2020)
     5 *           Alain Greiner          (2016,2017,2018,2019,2020)
    66 *
    77 * Copyright (c)  UPMC Sorbonne Universites
     
    5151{
    5252    mapper_t * mapper_ptr;
    53     kmem_req_t req;
    5453    error_t    error;
    5554
    5655    // allocate memory for mapper descriptor
    57     req.type    = KMEM_KCM;
    58     req.order   = bits_log2( sizeof(mapper_t) );
    59     req.flags   = AF_KERNEL | AF_ZERO;
    60     mapper_ptr  = kmem_remote_alloc( cxy , &req );
     56    mapper_ptr  = kmem_remote_alloc( cxy , bits_log2(sizeof(mapper_t)) , AF_ZERO );
    6157
    6258    if( mapper_ptr == NULL )
    6359    {
    64         printk("\n[ERROR] in %s : no memory for mapper descriptor\n", __FUNCTION__ );
     60
     61#if DEBUG_MAPPER_ERROR
     62printk("\n[ERROR] in %s : no memory for mapper descriptor\n", __FUNCTION__ );
     63#endif
    6564        return XPTR_NULL;
    6665    }
     
    7776    if( error )
    7877    {
    79         printk("\n[ERROR] in %s : cannot initialize radix tree\n", __FUNCTION__ );
    80         req.type  = KMEM_KCM;
    81         req.ptr   = mapper_ptr;
    82         kmem_remote_free( cxy , &req );
     78
     79#if DEBUG_MAPPER_ERROR
     80printk("\n[ERROR] in %s : cannot initialize radix tree\n", __FUNCTION__ );
     81kmem_remote_free( cxy , mapper_ptr , bits_log2(sizeof(mapper_t)) );
     82#endif
    8383        return XPTR_NULL;
    8484    }
     
    104104    uint32_t   found_index = 0;
    105105    uint32_t   start_index = 0;
    106     kmem_req_t req;
    107106
    108107    cxy_t      mapper_cxy = GET_CXY( mapper_xp );
     
    137136
    138137    // release memory for mapper descriptor
    139     req.type = KMEM_KCM;
    140     req.ptr  = mapper_ptr;
    141     kmem_remote_free( mapper_cxy , &req );
     138    kmem_remote_free( mapper_cxy , mapper_ptr , bits_log2(sizeof(mapper_t)) );
    142139
    143140}  // end mapper_destroy()
     
    153150    uint32_t   inode_type = 0;
    154151
    155     thread_t * this = CURRENT_THREAD;
     152#if DEBUG_MAPPER_HANDLE_MISS || DEBUG_MAPPER_ERROR
     153thread_t * this  = CURRENT_THREAD;
     154uint32_t   cycle = (uint32_t)hal_get_cycles();
     155#endif
    156156
    157157    // get target mapper cluster and local pointer
     
    170170
    171171#if DEBUG_MAPPER_HANDLE_MISS
    172 uint32_t      cycle = (uint32_t)hal_get_cycles();
    173172char          name[CONFIG_VFS_MAX_NAME_LENGTH];
    174173if( (DEBUG_MAPPER_HANDLE_MISS < cycle) && (inode != NULL) )
     
    185184#endif
    186185
    187 #if( DEBUG_MAPPER_HANDLE_MISS & 2 )
     186#if( DEBUG_MAPPER_HANDLE_MISS & 1 )
    188187if( DEBUG_MAPPER_HANDLE_MISS < cycle )
    189188{
     
    193192#endif
    194193
    195     // allocate one 4 Kbytes page from the remote mapper cluster
    196     xptr_t page_xp = ppm_remote_alloc_pages( mapper_cxy , 0 );
     194    // allocate one 4 Kbytes page in the remote mapper cluster
     195    void * base_ptr = kmem_remote_alloc( mapper_cxy , 12 , AF_NONE );
     196
     197    if( base_ptr == NULL )
     198    {
     199
     200#if DEBUG_MAPPER_ERROR
     201printk("\n[ERROR] in %s : thread [%x,%x] cannot allocate page in cluster %x / cycle %d\n",
     202__FUNCTION__ , this->process->pid, this->trdid , mapper_cxy , cycle );
     203#endif
     204        return -1;
     205    }
     206
     207    // get pointers on allocated page descrptor
     208    xptr_t   page_xp  = ppm_base2page( XPTR( mapper_cxy , base_ptr ) );
    197209    page_t * page_ptr = GET_PTR( page_xp );
    198                            
    199     if( page_xp == XPTR_NULL )
    200     {
    201         printk("\n[ERROR] in %s : thread [%x,%x] cannot allocate page in cluster %x\n",
    202         __FUNCTION__ , this->process->pid, this->trdid , mapper_cxy );
    203         return -1;
    204     }
    205210
    206211    // initialize the page descriptor
     
    217222                                 page_id,
    218223                                 page_ptr );
    219 
    220224    if( error )
    221225    {
    222         printk("\n[ERROR] in %s : thread[%x,%x] cannot insert page in mapper\n",
    223         __FUNCTION__ , this->process->pid, this->trdid );
    224         ppm_remote_free_pages( mapper_cxy , page_ptr );
     226
     227#if DEBUG_MAPPER_ERROR
     228printk("\n[ERROR] in %s : thread[%x,%x] cannot insert page in mapper / cycle %d\n",
     229__FUNCTION__ , this->process->pid, this->trdid , cycle );
     230ppm_remote_free_pages( mapper_cxy , page_ptr );
     231#endif
    225232        return -1;
    226233    }
     
    236243        if( error )
    237244        {
    238             printk("\n[ERROR] in %s : thread[%x,%x] cannot load page from device\n",
    239             __FUNCTION__ , this->process->pid, this->trdid );
    240             mapper_remote_release_page( mapper_xp , page_ptr );
     245
     246#if DEBUG_MAPPER_ERROR
     247printk("\n[ERROR] in %s : thread[%x,%x] cannot load page from device / cycle %d\n",
     248__FUNCTION__ , this->process->pid, this->trdid , cycle );
     249mapper_remote_release_page( mapper_xp , page_ptr );
     250#endif
    241251            return -1;
    242252         }
     
    260270#endif
    261271
    262 #if( DEBUG_MAPPER_HANDLE_MISS & 2 )
     272#if( DEBUG_MAPPER_HANDLE_MISS & 1 )
    263273if( DEBUG_MAPPER_HANDLE_MISS < cycle )
    264274{
     
    299309#endif
    300310
    301 #if( DEBUG_MAPPER_GET_PAGE & 2 )
     311#if( DEBUG_MAPPER_GET_PAGE & 1 )
    302312if( DEBUG_MAPPER_GET_PAGE < cycle )
    303313ppm_remote_display( local_cxy );
     
    336346            if( error )
    337347            {
    338                 printk("\n[ERROR] in %s : thread[%x,%x] cannot handle mapper miss\n",
    339                 __FUNCTION__ , this->process->pid, this->trdid );
    340                 remote_rwlock_wr_release( lock_xp );
     348
     349#if DEBUG_MAPPER_ERROR
     350printk("\n[ERROR] in %s : thread[%x,%x] cannot handle mapper miss\n",
     351__FUNCTION__ , this->process->pid, this->trdid );
     352remote_rwlock_wr_release( lock_xp );
     353#endif
    341354                return XPTR_NULL;
    342355            }
     
    364377#endif
    365378
    366 #if( DEBUG_MAPPER_GET_PAGE & 2)
     379#if( DEBUG_MAPPER_GET_PAGE & 1)
    367380if( DEBUG_MAPPER_GET_PAGE < cycle )
    368381ppm_remote_display( local_cxy );
     
    432445            if( error )
    433446            {
    434                 printk("\n[ERROR] in %s : thread[%x,%x] cannot handle mapper miss\n",
    435                 __FUNCTION__ , this->process->pid, this->trdid );
    436                 remote_rwlock_wr_release( lock_xp );
     447
     448#if DEBUG_MAPPER_ERROR
     449printk("\n[ERROR] in %s : thread[%x,%x] cannot handle mapper miss\n",
     450__FUNCTION__ , this->process->pid, this->trdid );
     451remote_rwlock_wr_release( lock_xp );
     452#endif
    437453                return XPTR_NULL;
    438454            }
     
    460476#endif
    461477
    462 #if( DEBUG_MAPPER_GET_FAT_PAGE & 2)
     478#if( DEBUG_MAPPER_GET_FAT_PAGE & 1)
    463479if( DEBUG_MAPPER_GET_FAT_PAGE < cycle )
    464480ppm_remote_display( local_cxy );
     
    532548
    533549    // compute indexes of pages for first and last byte in mapper
    534     uint32_t first = min_byte >> CONFIG_PPM_PAGE_SHIFT;
    535     uint32_t last  = max_byte >> CONFIG_PPM_PAGE_SHIFT;
     550    uint32_t first = min_byte >> CONFIG_PPM_PAGE_ORDER;
     551    uint32_t last  = max_byte >> CONFIG_PPM_PAGE_ORDER;
    536552
    537553#if (DEBUG_MAPPER_MOVE_USER & 1)
     
    668684
    669685    // compute indexes for first and last pages in mapper
    670     uint32_t first = min_byte >> CONFIG_PPM_PAGE_SHIFT;
    671     uint32_t last  = max_byte >> CONFIG_PPM_PAGE_SHIFT;
     686    uint32_t first = min_byte >> CONFIG_PPM_PAGE_ORDER;
     687    uint32_t last  = max_byte >> CONFIG_PPM_PAGE_ORDER;
    672688
    673689    // compute source and destination clusters
     
    853869            if( error )
    854870            {
    855                 printk("\n[ERROR] in %s : cannot synchonize dirty page %d\n",
    856                 __FUNCTION__, page_ptr->index );
     871
     872#if DEBUG_MAPPER_SYNC
     873printk("\n[ERROR] in %s : cannot synchonize dirty page %d\n",
     874__FUNCTION__, page_ptr->index );
     875#endif
    857876                return -1;
    858877            }
  • trunk/kernel/mm/mapper.h

    r657 r683  
    3939/*******************************************************************************************
    4040 * This mapper_t object implements the kernel cache for a given VFS file or directory.
    41  * There is one mapper per file/dir. It is implemented as a three levels radix tree,
    42  * entirely stored in the same cluster as the inode representing the file/dir.
     41 * There is one mapper per file/dir.
     42 * - It is implemented as a three levels radix tree, entirely stored in the same cluster
     43 *   as the inode representing the file/directory.
    4344 * - The fast retrieval key is the page index in the file.
    4445 *   The ix1_width, ix2_width, ix3_width sub-indexes are configuration parameters.
    4546 * - The leaves are pointers on physical page descriptors, dynamically allocated
    46  *   in the local cluster.
     47 *   in the same cluster as the radix tree.
    4748 * - The mapper is protected by a "remote_rwlock", to support several simultaneous
    4849 *   "readers", and only one "writer".
     
    6061 *   buffer, that can be physically located in any cluster.
    6162 * - In the present implementation the cache size for a given file increases on demand,
    62  *   and the  allocated memory is only released when the mapper/inode is destroyed.
     63 *   and the  allocated memory is only released when the inode is destroyed.
     64 *
     65 * WARNING : This mapper implementation makes the assumption that the PPM page size
     66 *           is 4 Kbytes. This code should be modified to support a generic page size,
     67 *           defined by the CONFIG_PPM_PAGE_SIZE parameter.
    6368 ******************************************************************************************/
    6469
  • trunk/kernel/mm/page.h

    r656 r683  
    33 *
    44 * Authors Ghassan Almalles (2008,2009,2010,2011,2012)
    5  *         Alain Greiner    (2016,2017,2018,2019)
     5 *         Alain Greiner    (2016,2017,2018,2019,2020)
    66 *
    77 * Copyright (c) UPMC Sorbonne Universites
  • trunk/kernel/mm/ppm.c

    r672 r683  
    6060
    6161   void   * base_ptr = ppm->vaddr_base +
    62                        ((page_ptr - ppm->pages_tbl)<<CONFIG_PPM_PAGE_SHIFT);
     62                       ((page_ptr - ppm->pages_tbl)<<CONFIG_PPM_PAGE_ORDER);
    6363
    6464        return XPTR( page_cxy , base_ptr );
     
    7575
    7676        page_t * page_ptr = ppm->pages_tbl +
    77                         ((base_ptr - ppm->vaddr_base)>>CONFIG_PPM_PAGE_SHIFT);
     77                        ((base_ptr - ppm->vaddr_base)>>CONFIG_PPM_PAGE_ORDER);
    7878
    7979        return XPTR( base_cxy , page_ptr );
     
    9191    page_t * page_ptr = GET_PTR( page_xp );
    9292
    93     paddr_t  paddr    = PADDR( page_cxy , (page_ptr - ppm->pages_tbl)<<CONFIG_PPM_PAGE_SHIFT );
    94 
    95     return (ppn_t)(paddr >> CONFIG_PPM_PAGE_SHIFT);
     93    paddr_t  paddr    = PADDR( page_cxy , (page_ptr - ppm->pages_tbl)<<CONFIG_PPM_PAGE_ORDER );
     94
     95    return (ppn_t)(paddr >> CONFIG_PPM_PAGE_ORDER);
    9696
    9797}  // end hal_page2ppn()
     
    102102        ppm_t   * ppm  = &LOCAL_CLUSTER->ppm;
    103103
    104     paddr_t  paddr = ((paddr_t)ppn) << CONFIG_PPM_PAGE_SHIFT;
     104    paddr_t  paddr = ((paddr_t)ppn) << CONFIG_PPM_PAGE_ORDER;
    105105
    106106    cxy_t    cxy   = CXY_FROM_PADDR( paddr );
    107107    lpa_t    lpa   = LPA_FROM_PADDR( paddr );
    108108
    109     return XPTR( cxy , &ppm->pages_tbl[lpa>>CONFIG_PPM_PAGE_SHIFT] );
     109    return XPTR( cxy , &ppm->pages_tbl[lpa>>CONFIG_PPM_PAGE_ORDER] );
    110110
    111111}  // end hal_ppn2page
     
    118118        ppm_t  * ppm   = &LOCAL_CLUSTER->ppm;
    119119   
    120     paddr_t  paddr = ((paddr_t)ppn) << CONFIG_PPM_PAGE_SHIFT;
     120    paddr_t  paddr = ((paddr_t)ppn) << CONFIG_PPM_PAGE_ORDER;
    121121
    122122    cxy_t    cxy   = CXY_FROM_PADDR( paddr );
     
    137137    paddr_t  paddr    = PADDR( base_cxy , (base_ptr - ppm->vaddr_base) );
    138138
    139     return (ppn_t)(paddr >> CONFIG_PPM_PAGE_SHIFT);
     139    return (ppn_t)(paddr >> CONFIG_PPM_PAGE_ORDER);
    140140
    141141}  // end ppm_base2ppn()
     
    159159
    160160assert( __FUNCTION__, !page_is_flag( page , PG_FREE ) ,
    161 "page already released : ppn = %x\n" , ppm_page2ppn( XPTR( local_cxy , page ) ) );
     161"page already released : ppn = %x" , ppm_page2ppn( XPTR( local_cxy , page ) ) );
    162162
    163163assert( __FUNCTION__, !page_is_flag( page , PG_RESERVED ) ,
    164 "reserved page : ppn = %x\n" , ppm_page2ppn( XPTR( local_cxy , page ) ) );
     164"reserved page : ppn = %x" , ppm_page2ppn( XPTR( local_cxy , page ) ) );
    165165
    166166        // set FREE flag in released page descriptor
     
    214214        page_t   * found_block; 
    215215
    216     thread_t * this = CURRENT_THREAD;
    217 
    218216        ppm_t    * ppm = &LOCAL_CLUSTER->ppm;
    219217
    220 #if DEBUG_PPM_ALLOC_PAGES
    221 uint32_t cycle = (uint32_t)hal_get_cycles();
     218#if DEBUG_PPM_ALLOC_PAGES || DEBUG_PPM_ERROR
     219thread_t * this  = CURRENT_THREAD;
     220uint32_t   cycle = (uint32_t)hal_get_cycles();
    222221#endif
    223222
     
    232231
    233232// check order
    234 assert( __FUNCTION__, (order < CONFIG_PPM_MAX_ORDER) , "illegal order argument = %d\n" , order );
     233assert( __FUNCTION__, (order < CONFIG_PPM_MAX_ORDER) ,
     234"illegal order argument = %d" , order );
    235235
    236236    //build extended pointer on lock protecting remote PPM
     
    273273        if( current_block == NULL ) // return failure if no free block found
    274274        {
    275                 // release lock protecting free lists
     275
     276#if DEBUG_PPM_ERROR
     277printk("\n[ERROR] in %s thread[%x,%x] cannot allocate %d page(s) in cluster %x / cycle %d\n",
     278__FUNCTION__, this->process->pid, this->trdid, 1<<order, local_cxy, cycle );
     279#endif
     280        // release lock protecting free lists
    276281                remote_busylock_release( lock_xp );
    277 
    278         printk("\n[%s] thread[%x,%x] cannot allocate %d page(s) in cluster %x\n",
    279         __FUNCTION__, this->process->pid, this->trdid, 1<<order, local_cxy );
    280 
    281282                return NULL;
    282283        }
     
    385386    page_t   * found_block;
    386387
    387     thread_t * this  = CURRENT_THREAD;
    388 
    389388// check order
    390 assert( __FUNCTION__, (order < CONFIG_PPM_MAX_ORDER) , "illegal order argument = %d\n" , order );
     389assert( __FUNCTION__, (order < CONFIG_PPM_MAX_ORDER) ,
     390"illegal order argument = %d" , order );
    391391
    392392    // get local pointer on PPM (same in all clusters)
    393393        ppm_t * ppm = &LOCAL_CLUSTER->ppm;
    394394
    395 #if DEBUG_PPM_REMOTE_ALLOC_PAGES
     395#if DEBUG_PPM_ALLOC_PAGES || DEBUG_PPM_ERROR
     396thread_t * this  = CURRENT_THREAD;
    396397uint32_t   cycle = (uint32_t)hal_get_cycles();
    397398#endif
    398399
    399 #if DEBUG_PPM_REMOTE_ALLOC_PAGES
    400 if( DEBUG_PPM_REMOTE_ALLOC_PAGES < cycle )
     400#if DEBUG_PPM_ALLOC_PAGES
     401if( DEBUG_PPM_ALLOC_PAGES < cycle )
    401402{
    402403    printk("\n[%s] thread[%x,%x] enter for %d page(s) in cluster %x / cycle %d\n",
    403404    __FUNCTION__, this->process->pid, this->trdid, 1<<order, cxy, cycle );
    404     if( DEBUG_PPM_REMOTE_ALLOC_PAGES & 1 ) ppm_remote_display( cxy );
     405    if( DEBUG_PPM_ALLOC_PAGES & 1 ) ppm_remote_display( cxy );
    405406}
    406407#endif
     
    445446        if( current_block == NULL ) // return failure
    446447        {
     448
     449#if DEBUG_PPM_ERROR
     450 printk("\n[ERROR] in %s : thread[%x,%x] cannot allocate %d page(s) in cluster %x / cycle %d\n",
     451__FUNCTION__, this->process->pid, this->trdid, 1<<order, cxy, cycle );
     452#endif
    447453                // release lock protecting free lists
    448454                remote_busylock_release( lock_xp );
    449 
    450         printk("\n[ERROR] in %s : thread[%x,%x] cannot allocate %d page(s) in cluster %x\n",
    451         __FUNCTION__, this->process->pid, this->trdid, 1<<order, cxy );
    452 
    453455                return XPTR_NULL;
    454456        }
     
    489491    hal_fence();
    490492
    491 #if DEBUG_PPM_REMOTE_ALLOC_PAGES
    492 if( DEBUG_PPM_REMOTE_ALLOC_PAGES < cycle )
     493#if DEBUG_PPM_ALLOC_PAGES
     494if( DEBUG_PPM_ALLOC_PAGES < cycle )
    493495{
    494496    printk("\n[%s] thread[%x,%x] allocated %d page(s) in cluster %x / ppn %x / cycle %d\n",
    495497    __FUNCTION__, this->process->pid, this->trdid,
    496498    1<<order, cxy, ppm_page2ppn(XPTR( cxy , found_block )), cycle );
    497     if( DEBUG_PPM_REMOTE_ALLOC_PAGES & 1 ) ppm_remote_display( cxy );
     499    if( DEBUG_PPM_ALLOC_PAGES & 1 ) ppm_remote_display( cxy );
    498500}
    499501#endif
     
    521523    uint32_t   order = hal_remote_l32( XPTR( page_cxy , &page_ptr->order ) );
    522524
    523 #if DEBUG_PPM_REMOTE_FREE_PAGES
     525#if DEBUG_PPM_FREE_PAGES
    524526thread_t * this  = CURRENT_THREAD;
    525527uint32_t   cycle = (uint32_t)hal_get_cycles();
     
    527529#endif
    528530
    529 #if DEBUG_PPM_REMOTE_FREE_PAGES
    530 if( DEBUG_PPM_REMOTE_FREE_PAGES < cycle )
     531#if DEBUG_PPM_FREE_PAGES
     532if( DEBUG_PPM_FREE_PAGES < cycle )
    531533{
    532534    printk("\n[%s] thread[%x,%x] enter for %d page(s) in cluster %x / ppn %x / cycle %d\n",
    533535    __FUNCTION__, this->process->pid, this->trdid, 1<<order, page_cxy, ppn, cycle );
    534     if( DEBUG_PPM_REMOTE_FREE_PAGES & 1 ) ppm_remote_display( page_cxy );
     536    if( DEBUG_PPM_FREE_PAGES & 1 ) ppm_remote_display( page_cxy );
    535537}
    536538#endif
     
    549551
    550552assert( __FUNCTION__, !page_remote_is_flag( page_xp , PG_FREE ) ,
    551 "page already released : ppn = %x\n" , ppm_page2ppn(XPTR( page_cxy , page_ptr ) ) );
     553"page already released : ppn = %x" , ppm_page2ppn(XPTR( page_cxy , page_ptr ) ) );
    552554
    553555assert( __FUNCTION__, !page_remote_is_flag( page_xp , PG_RESERVED ) ,
    554 "reserved page : ppn = %x\n" , ppm_page2ppn(XPTR( page_cxy , page_ptr ) ) );
     556"reserved page : ppn = %x" , ppm_page2ppn(XPTR( page_cxy , page_ptr ) ) );
    555557
    556558        // set the FREE flag in released page descriptor
     
    607609    hal_fence();
    608610
    609 #if DEBUG_PPM_REMOTE_FREE_PAGES
    610 if( DEBUG_PPM_REMOTE_FREE_PAGES < cycle )
     611#if DEBUG_PPM_FREE_PAGES
     612if( DEBUG_PPM_FREE_PAGES < cycle )
    611613{
    612614    printk("\n[%s] thread[%x,%x] released %d page(s) in cluster %x / ppn %x / cycle %d\n",
    613615    __FUNCTION__, this->process->pid, this->trdid, 1<<order, page_cxy, ppn, cycle );
    614     if( DEBUG_PPM_REMOTE_FREE_PAGES & 1 ) ppm_remote_display( page_cxy );
     616    if( DEBUG_PPM_FREE_PAGES & 1 ) ppm_remote_display( page_cxy );
    615617}
    616618#endif
  • trunk/kernel/mm/ppm.h

    r656 r683  
    33 *
    44 * Authors  Ghassan Almaless (2008,2009,2010,2011,2012)
    5  *          Alain Greiner    (2016,2017,2018,2019)
     5 *          Alain Greiner    (2016,2017,2018,2019,2020)
    66 *
    77 * Copyright (c) UPMC Sorbonne Universites
     
    5757 * the "buddy" algorithm.
    5858 * The local threads can access these free_lists by calling the ppm_alloc_pages() and
    59  * ppm_free_page() functions, but the remote threads can access the same free lists,
     59 * ppm_free_page() functions, and the remote threads can access the same free lists,
    6060 * by calling the ppm_remote_alloc_pages() and ppm_remote_free_pages functions.
    6161 * Therefore, these free lists are protected by a remote_busy_lock.
     
    9898 * physical pages. It takes the lock protecting the free_lists before register the
    9999 * released page in the relevant free_list.
    100  * In normal use, you do not need to call it directly, as the recommended way to free
     100 * In normal use, it should not be called directly, as the recommended way to free
    101101 * physical pages is to call the generic allocator defined in kmem.h.
    102102 *****************************************************************************************
  • trunk/kernel/mm/vmm.c

    r672 r683  
    11/*
    2  * vmm.c - virtual memory manager related operations definition.
     2 * vmm.c - virtual memory manager related operations implementation.
    33 *
    44 * Authors   Ghassan Almaless (2008,2009,2010,2011,2012)
     
    8989
    9090// check ltid argument
    91 assert( __FUNCTION__, (ltid <= ((CONFIG_VMM_VSPACE_SIZE - CONFIG_VMM_STACK_BASE) / CONFIG_VMM_STACK_SIZE)),
     91assert( __FUNCTION__,
     92(ltid <= ((CONFIG_VMM_VSPACE_SIZE - CONFIG_VMM_STACK_BASE) / CONFIG_VMM_STACK_SIZE)),
    9293"slot index %d too large for an user stack vseg", ltid );
    9394
     
    107108    if( vseg == NULL )
    108109        {
    109         // release lock protecting free lists
     110 
     111#if DEBUG_VMM_ERROR
     112printk("\n[ERROR] %s cannot allocate memory for vseg in cluster %x\n",
     113__FUNCTION__ , local_cxy );
     114#endif
    110115        busylock_release( &mgr->lock );
    111 
    112         printk("\n[ERROR] %s cannot allocate memory for vseg in cluster %x\n",
    113         __FUNCTION__ , local_cxy );
    114 
    115116        return NULL;
    116117    }
     
    346347    if( current_vseg == NULL )  // return failure
    347348    {
    348         // release lock protecting free lists
     349
     350#if DEBUG_VMM_ERROR
     351printk("\n[ERROR] %s cannot allocate ) %d page(s) in cluster %x\n",
     352__FUNCTION__, npages , local_cxy );
     353#endif
    349354        busylock_release( &mgr->lock );
    350 
    351         printk("\n[ERROR] %s cannot allocate ) %d page(s) in cluster %x\n",
    352         __FUNCTION__, npages , local_cxy );
    353 
    354355        return NULL;
    355356    }
     
    368369            if( new_vseg == NULL )
    369370        {
    370                 // release lock protecting free lists
     371
     372#if DEBUG_VMM_ERROR
     373printk("\n[ERROR] %s cannot allocate memory for vseg in cluster %x\n",
     374__FUNCTION__ , local_cxy );
     375#endif
    371376            busylock_release( &mgr->lock );
    372 
    373             printk("\n[ERROR] %s cannot allocate memory for vseg in cluster %x\n",
    374             __FUNCTION__ , local_cxy );
    375 
    376377            return NULL;
    377378            }
     
    517518                    XPTR( local_cxy , &vseg->xlist ) );
    518519
    519 }  // end vmm_attach_vseg_from_vsl()
     520}  // end vmm_attach_vseg_to_vsl()
    520521
    521522////////////////////////////////////////////////////////////////////////////////////////////
     
    537538    xlist_unlink( XPTR( local_cxy , &vseg->xlist ) );
    538539
    539 }  // end vmm_detach_from_vsl()
     540}  // end vmm_detach_vseg_from_vsl()
    540541
    541542////////////////////////////////////////////
     
    12901291            if( child_vseg == NULL )   // release all allocated vsegs
    12911292            {
     1293
     1294#if DEBUG_VMM_ERROR
     1295printk("\n[ERROR] in %s : cannot create vseg for child in cluster %x\n",
     1296__FUNCTION__, local_cxy );
     1297#endif
    12921298                vmm_destroy( child_process );
    1293                 printk("\n[ERROR] in %s : cannot create vseg for child\n", __FUNCTION__ );
    12941299                return -1;
    12951300            }
     
    13381343                    if( error )
    13391344                    {
     1345
     1346#if DEBUG_VMM_ERROR
     1347printk("\n[ERROR] in %s : cannot copy GPT\n",
     1348__FUNCTION__ );
     1349#endif
    13401350                        vmm_destroy( child_process );
    1341                         printk("\n[ERROR] in %s : cannot copy GPT\n", __FUNCTION__ );
    13421351                        return -1;
    13431352                    }
     
    13571366    remote_queuelock_release( parent_lock_xp );
    13581367
    1359 /* deprecated [AG] : this is already done by the vmm_user_init() funcfion
    1360 
    1361     // initialize the child VMM STACK allocator
    1362     vmm_stack_init( child_vmm );
    1363 
    1364     // initialize the child VMM MMAP allocator
    1365     vmm_mmap_init( child_vmm );
    1366 
    1367     // initialize instrumentation counters
    1368         child_vmm->false_pgfault_nr    = 0;
    1369         child_vmm->local_pgfault_nr    = 0;
    1370         child_vmm->global_pgfault_nr   = 0;
    1371         child_vmm->false_pgfault_cost  = 0;
    1372         child_vmm->local_pgfault_cost  = 0;
    1373         child_vmm->global_pgfault_cost = 0;
    1374 */
    13751368    // copy base addresses from parent VMM to child VMM
    13761369    child_vmm->args_vpn_base = (vpn_t)hal_remote_lpt(XPTR(parent_cxy, &parent_vmm->args_vpn_base));
     
    15641557        if( vseg == NULL )
    15651558        {
    1566             printk("\n[ERROR] %s cannot create %s vseg for process %x in cluster %x\n",
    1567             __FUNCTION__ , vseg_type_str( type ) , process->pid , local_cxy );
     1559
     1560#if DEBUG_VMM_ERROR
     1561printk("\n[ERROR] %s cannot create %s vseg for process %x in cluster %x\n",
     1562__FUNCTION__ , vseg_type_str( type ) , process->pid , local_cxy );
     1563#endif
    15681564            return NULL;
    15691565        }
     
    15721568        vseg->type = type;
    15731569        vseg->vmm  = vmm;
    1574         vseg->min  = vseg->vpn_base << CONFIG_PPM_PAGE_SHIFT;
    1575         vseg->max  = vseg->min + (vseg->vpn_size << CONFIG_PPM_PAGE_SHIFT);
     1570        vseg->min  = vseg->vpn_base << CONFIG_PPM_PAGE_ORDER;
     1571        vseg->max  = vseg->min + (vseg->vpn_size << CONFIG_PPM_PAGE_ORDER);
    15761572        vseg->cxy  = cxy;
    15771573
     
    15821578    {
    15831579        // compute page index (in mapper) for first and last byte
    1584         vpn_t    vpn_min    = file_offset >> CONFIG_PPM_PAGE_SHIFT;
    1585         vpn_t    vpn_max    = (file_offset + size - 1) >> CONFIG_PPM_PAGE_SHIFT;
     1580        vpn_t    vpn_min    = file_offset >> CONFIG_PPM_PAGE_ORDER;
     1581        vpn_t    vpn_max    = (file_offset + size - 1) >> CONFIG_PPM_PAGE_ORDER;
    15861582
    15871583        // compute offset in first page and number of pages
     
    15941590        if( vseg == NULL )
    15951591        {
    1596             printk("\n[ERROR] %s cannot create %s vseg for process %x in cluster %x\n",
    1597             __FUNCTION__ , vseg_type_str( type ) , process->pid , local_cxy );
     1592
     1593#if DEBUG_VMM_ERROR
     1594printk("\n[ERROR] %s cannot create %s vseg for process %x in cluster %x\n",
     1595__FUNCTION__ , vseg_type_str( type ) , process->pid , local_cxy );
     1596#endif
    15981597            return NULL;
    15991598        }
     
    16021601        vseg->type        = type;
    16031602        vseg->vmm         = vmm;
    1604         vseg->min         = (vseg->vpn_base << CONFIG_PPM_PAGE_SHIFT) + offset;
     1603        vseg->min         = (vseg->vpn_base << CONFIG_PPM_PAGE_ORDER) + offset;
    16051604        vseg->max         = vseg->min + size;
    16061605        vseg->file_offset = file_offset;
     
    16151614    {
    16161615        // compute number of required pages in virtual space
    1617         vpn_t npages = size >> CONFIG_PPM_PAGE_SHIFT;
     1616        vpn_t npages = size >> CONFIG_PPM_PAGE_ORDER;
    16181617        if( size & CONFIG_PPM_PAGE_MASK) npages++;
    16191618       
     
    16231622        if( vseg == NULL )
    16241623        {
    1625             printk("\n[ERROR] %s cannot create %s vseg for process %x in cluster %x\n",
    1626             __FUNCTION__ , vseg_type_str( type ) , process->pid , local_cxy );
     1624
     1625#if DEBUG_VMM_ERROR
     1626printk("\n[ERROR] %s cannot create %s vseg for process %x in cluster %x\n",
     1627__FUNCTION__ , vseg_type_str( type ) , process->pid , local_cxy );
     1628#endif
    16271629            return NULL;
    16281630        }
     
    16311633        vseg->type = type;
    16321634        vseg->vmm  = vmm;
    1633         vseg->min  = vseg->vpn_base << CONFIG_PPM_PAGE_SHIFT;
    1634         vseg->max  = vseg->min + (vseg->vpn_size << CONFIG_PPM_PAGE_SHIFT);
     1635        vseg->min  = vseg->vpn_base << CONFIG_PPM_PAGE_ORDER;
     1636        vseg->max  = vseg->min + (vseg->vpn_size << CONFIG_PPM_PAGE_ORDER);
    16351637        vseg->cxy  = cxy;
    16361638
     
    16401642    else    // VSEG_TYPE_DATA, VSEG_TYPE_CODE or KERNEL vseg
    16411643    {
    1642         uint32_t vpn_min = base >> CONFIG_PPM_PAGE_SHIFT;
    1643         uint32_t vpn_max = (base + size - 1) >> CONFIG_PPM_PAGE_SHIFT;
     1644        uint32_t vpn_min = base >> CONFIG_PPM_PAGE_ORDER;
     1645        uint32_t vpn_max = (base + size - 1) >> CONFIG_PPM_PAGE_ORDER;
    16441646
    16451647        // allocate vseg descriptor
     
    16481650            if( vseg == NULL )
    16491651            {
    1650             printk("\n[ERROR] %s cannot create %s vseg for process %x in cluster %x\n",
    1651             __FUNCTION__ , vseg_type_str( type ) , process->pid , local_cxy );
     1652
     1653#if DEBUG_VMM_ERROR
     1654printk("\n[ERROR] %s cannot create %s vseg for process %x in cluster %x\n",
     1655__FUNCTION__ , vseg_type_str( type ) , process->pid , local_cxy );
     1656#endif
    16521657            return NULL;
    16531658            }
     1659
    16541660        // initialize vseg
    16551661        vseg->type        = type;
     
    16571663        vseg->min         = base;
    16581664        vseg->max         = base + size;
    1659         vseg->vpn_base    = base >> CONFIG_PPM_PAGE_SHIFT;
     1665        vseg->vpn_base    = base >> CONFIG_PPM_PAGE_ORDER;
    16601666        vseg->vpn_size    = vpn_max - vpn_min + 1;
    16611667        vseg->file_offset = file_offset;
     
    16721678    if( existing_vseg != NULL )
    16731679    {
    1674         printk("\n[ERROR] in %s for process %x : new vseg %s [vpn_base %x / vpn_size %x]\n"
    1675                "        overlap existing vseg %s [vpn_base %x / vpn_size %x]\n",
    1676         __FUNCTION__ , process->pid, vseg_type_str(vseg->type), vseg->vpn_base, vseg->vpn_size,
    1677         vseg_type_str(existing_vseg->type), existing_vseg->vpn_base, existing_vseg->vpn_size );
     1680
     1681#if DEBUG_VMM_ERROR
     1682printk("\n[ERROR] in %s for process %x : new vseg %s [vpn_base %x / vpn_size %x]\n"
     1683       "        overlap existing vseg %s [vpn_base %x / vpn_size %x]\n",
     1684__FUNCTION__ , process->pid, vseg_type_str(vseg->type), vseg->vpn_base, vseg->vpn_size,
     1685vseg_type_str(existing_vseg->type), existing_vseg->vpn_base, existing_vseg->vpn_size );
     1686#endif
    16781687        vseg_free( vseg );
    16791688        return NULL;
     
    18011810    if( do_kmem_release )
    18021811    {
    1803         kmem_req_t req;
    1804         req.type = KMEM_PPM;
    1805         req.ptr  = GET_PTR( ppm_ppn2base( ppn ) );
    1806 
    1807         kmem_remote_free( page_cxy , &req );
     1812        // get physical page order
     1813        uint32_t order = CONFIG_PPM_PAGE_ORDER +
     1814                         hal_remote_l32( XPTR( page_cxy , &page_ptr->order ));
     1815
     1816        // get physical page base
     1817        void * base = GET_PTR( ppm_ppn2base( ppn ) );
     1818
     1819        // release physical page
     1820        kmem_remote_free( page_cxy , base , order );
    18081821
    18091822#if DEBUG_VMM_PPN_RELEASE
     
    18551868#endif
    18561869
    1857     // loop on PTEs in GPT to unmap all mapped PTE
    1858         for( vpn = vpn_min ; vpn < vpn_max ; vpn++ )
     1870    // the loop on PTEs in GPT to unmap all mapped PTEs
     1871    for( vpn = vpn_min ; vpn < vpn_max ; vpn++ )
    18591872    {
    18601873        // get ppn and attr
     
    19421955    intptr_t min          = new_base;
    19431956    intptr_t max          = new_base + new_size;
    1944     vpn_t    new_vpn_min  = min >> CONFIG_PPM_PAGE_SHIFT;
    1945     vpn_t    new_vpn_max  = (max - 1) >> CONFIG_PPM_PAGE_SHIFT;
     1957    vpn_t    new_vpn_min  = min >> CONFIG_PPM_PAGE_ORDER;
     1958    vpn_t    new_vpn_max  = (max - 1) >> CONFIG_PPM_PAGE_ORDER;
    19461959
    19471960    // build extended pointer on GPT
     
    20822095        if( ref_cxy == local_cxy )    // local is ref => return error
    20832096        {
    2084             printk("\n[ERROR] in %s : vaddr %x in process %x out of segment\n",
    2085             __FUNCTION__, vaddr, process->pid );
    2086 
    2087             // release local VSL lock
     2097
     2098#if DEBUG_VMM_ERROR
     2099printk("\n[ERROR] in %s : vaddr %x in process %x out of segment\n",
     2100__FUNCTION__, vaddr, process->pid );
     2101#endif
    20882102            remote_queuelock_release( loc_lock_xp );
    2089 
    20902103            return -1;
    20912104        }
     
    21032116            if( ref_vseg == NULL )  // vseg not found => return error
    21042117            {
    2105                 // release both VSL locks
     2118
     2119#if DEBUG_VMM_ERROR
     2120printk("\n[ERROR] in %s : vaddr %x in process %x out of segment\n",
     2121__FUNCTION__, vaddr, process->pid );
     2122#endif
    21062123                remote_queuelock_release( loc_lock_xp );
    21072124                remote_queuelock_release( ref_lock_xp );
    2108 
    2109                 printk("\n[ERROR] in %s : vaddr %x in process %x out of segment\n",
    2110                 __FUNCTION__, vaddr, process->pid );
    2111 
    21122125                return -1;
    21132126            }
     
    21192132                if( loc_vseg == NULL )   // no memory => return error
    21202133                {
    2121                     printk("\n[ERROR] in %s : vaddr %x in process %x / no memory\n",
    2122                     __FUNCTION__, vaddr, process->pid );
    2123 
    2124                     // release both VSL locks
     2134
     2135#if DEBUG_VMM_ERROR
     2136printk("\n[ERROR] in %s : vaddr %x in process %x / no memory\n",
     2137__FUNCTION__, vaddr, process->pid );
     2138#endif
    21252139                    remote_queuelock_release( ref_lock_xp );
    21262140                    remote_queuelock_release( loc_lock_xp );
    2127 
    21282141                    return -1;
    21292142                }
     
    21582171//////////////////////////////////////////////////////////////////////////////////////
    21592172// This static function compute the target cluster to allocate a physical page
    2160 // for a given <vpn> in a given <vseg>, allocates the page and returns an extended
    2161 // pointer on the allocated page descriptor.
     2173// for a given <vpn> in a given <vseg>, allocates the physical page from a local
     2174// or remote cluster (depending on the vseg type), and returns an extended pointer
     2175// on the allocated page descriptor.
    21622176// The vseg cannot have the FILE type.
    21632177//////////////////////////////////////////////////////////////////////////////////////
    21642178// @ vseg   : local pointer on vseg.
    21652179// @ vpn    : unmapped vpn.
    2166 // @ return an extended pointer on the allocated page descriptor.
     2180// @ return xptr on page descriptor if success / return XPTR_NULL if failure
    21672181//////////////////////////////////////////////////////////////////////////////////////
    21682182static xptr_t vmm_page_allocate( vseg_t * vseg,
     
    22072221    }
    22082222
    2209     // allocate one small physical page from target cluster
    2210     kmem_req_t req;
    2211     req.type  = KMEM_PPM;
    2212     req.order = 0;
    2213     req.flags = AF_ZERO;
    2214 
    22152223    // get local pointer on page base
    2216     void * ptr = kmem_remote_alloc( page_cxy , &req );
    2217 
     2224    void * ptr = kmem_remote_alloc( page_cxy , CONFIG_PPM_PAGE_ORDER , AF_ZERO );
     2225
     2226    if( ptr == NULL )
     2227    {
     2228
     2229#if DEBUG_VMM_ERROR
     2230printk("\n[ERROR] in %s : cannot allocate memory from cluster %x\n",
     2231__FUNCTION__, page_cxy );
     2232#endif
     2233        return XPTR_NULL;
     2234    }     
    22182235    // get extended pointer on page descriptor
    22192236    page_xp = ppm_base2page( XPTR( page_cxy , ptr ) );
     
    22912308       
    22922309            // compute missing page offset in vseg
    2293             uint32_t offset = page_id << CONFIG_PPM_PAGE_SHIFT;
     2310            uint32_t offset = page_id << CONFIG_PPM_PAGE_ORDER;
    22942311
    22952312            // compute missing page offset in .elf file
     
    24272444    // get local vseg (access to reference VSL can be required)
    24282445    error = vmm_get_vseg( process,
    2429                           (intptr_t)vpn<<CONFIG_PPM_PAGE_SHIFT,
     2446                          (intptr_t)vpn<<CONFIG_PPM_PAGE_ORDER,
    24302447                          &vseg );
    24312448    if( error )
     
    27522769    // get local vseg
    27532770    error = vmm_get_vseg( process,
    2754                           (intptr_t)vpn<<CONFIG_PPM_PAGE_SHIFT,
     2771                          (intptr_t)vpn<<CONFIG_PPM_PAGE_ORDER,
    27552772                          &vseg );
    27562773    if( error )
  • trunk/kernel/mm/vseg.c

    r672 r683  
    6262vseg_t * vseg_alloc( void )
    6363{
    64     kmem_req_t   req;
    65 
    66     req.type  = KMEM_KCM;
    67         req.order = bits_log2( sizeof(vseg_t) );
    68         req.flags = AF_KERNEL | AF_ZERO;
    69 
    70     return kmem_alloc( &req );
     64    return (vseg_t*)kmem_alloc( bits_log2( sizeof(vseg_t)) , AF_ZERO );
    7165}
    7266
     
    7468void vseg_free( vseg_t * vseg )
    7569{
    76     kmem_req_t  req;
    77 
    78         req.type = KMEM_KCM;
    79         req.ptr  = vseg;
    80         kmem_free( &req );
     70        kmem_free( vseg , bits_log2( sizeof(vseg_t)) );
    8171}
    8272
  • trunk/kernel/mm/vseg.h

    r657 r683  
    8282    vpn_t             vpn_base;     /*! first page of vseg                                */
    8383    vpn_t             vpn_size;     /*! number of pages occupied                          */
     84    xptr_t            mapper_xp;    /*! xptr on remote mapper (for types CODE/DATA/FILE)  */
    8485    uint32_t          flags;        /*! vseg attributes                                   */
    85     xptr_t            mapper_xp;    /*! xptr on remote mapper (for types CODE/DATA/FILE)  */
    8686    intptr_t          file_offset;  /*! vseg offset in file (for types CODE/DATA/FILE)    */
    8787    intptr_t          file_size;    /*! max segment size in mapper (for type CODE/DATA)   */
  • trunk/kernel/syscalls/shared_include/shared_almos.h

    r670 r683  
    22 * shared_almos.h - Shared mnemonics used by the almos-mkh specific syscalls.
    33 *
    4  * Author  Alain Greiner (2016,2017,2018)
     4 * Author  Alain Greiner (2016,2017,2018,2019,2020)
    55 *
    66 * Copyright (c) UPMC Sorbonne Universites
     
    6262
    6363/*******************************************************************************************
     64 * This enum defines the operation mnemonics for the non standard get_xxx() syscalls.
     65 ******************************************************************************************/
     66
     67typedef enum
     68{
     69    GET_PROCESSES    = 0,     
     70    GET_CONFIG       = 1,
     71    GET_CORE_ID      = 2,
     72    GET_NB_CORES     = 3,
     73    GET_BEST_CORE    = 4,
     74    GET_CYCLE        = 5,
     75        GET_THREAD_INFO  = 6,
     76}
     77get_operation_type_t;
     78
     79/*******************************************************************************************
    6480 * This structure defines the - user accessible - information stored in a thread.
    6581 ******************************************************************************************/
  • trunk/kernel/syscalls/shared_include/shared_dirent.h

    r611 r683  
    11/*
    2  * shared_dirent.h - Shared structure used by the opendir() / readdir() / closedir() syscalls.
     2 * shared_dirent.h - structures used by the opendir() / readdir() / closedir() syscalls.
    33 *
    4  * Author  Alain Greiner (2016,2017,2018)
     4 * Author  Alain Greiner (2016,2017,2018,2019,2020)
    55 *
    66 * Copyright (c) UPMC Sorbonne Universites
  • trunk/kernel/syscalls/shared_include/shared_socket.h

    r670 r683  
    6969    SOCK_SEND        = 5,
    7070    SOCK_RECV        = 6,
     71    SOCK_SENDTO      = 7,
     72    SOCK_RECVFROM    = 8,
    7173}
    7274socket_operation_type_t;
  • trunk/kernel/syscalls/shared_include/syscalls_numbers.h

    r657 r683  
    22 * syscalls_numbers.c - Contains enum of the syscalls.
    33 *
    4  * Author    Alain Greiner (2016,2017,2018,2019)
     4 * Author    Alain Greiner (2016,2017,2018,2019,2020)
    55 *
    66 * Copyright (c) UPMC Sorbonne Universites
     
    2727/******************************************************************************************
    2828 * This enum defines the mnemonics for the syscall indexes.
    29  * It must be kept consistent with the array defined in do_syscalls.c
     29 * It must be kept consistent with the array defined in the <do_syscalls.c> file
     30 * and with the SYS_OBJs defined in the kernel <Makefile>
    3031 *****************************************************************************************/
    3132typedef enum
     
    7576    SYS_WAIT            = 39,
    7677
    77     SYS_GET_CONFIG      = 40,
    78     SYS_GET_CORE_ID     = 41,
    79     SYS_GET_CYCLE       = 42,
    80     SYS_DISPLAY         = 43,
    81     SYS_PLACE_FORK      = 44, 
    82     SYS_THREAD_SLEEP    = 45,
    83     SYS_THREAD_WAKEUP   = 46,
    84     SYS_TRACE           = 47,
    85     SYS_FG              = 48,
    86     SYS_IS_FG           = 49,
     78    SYS_GET             = 40,
     79    SYS_DISPLAY         = 41,
     80    SYS_PLACE_FORK      = 42, 
     81    SYS_THREAD_SLEEP    = 43,
     82    SYS_THREAD_WAKEUP   = 44,
     83    SYS_TRACE           = 45,
     84    SYS_FG              = 46,
     85    SYS_IS_FG           = 47,
     86    SYS_FBF             = 48,
     87    SYS_UNDEFINED_49    = 49,
    8788
    8889    SYS_EXIT            = 50,
    8990    SYS_SYNC            = 51,
    9091    SYS_FSYNC           = 52,
    91     SYS_GET_BEST_CORE   = 53,
    92     SYS_GET_NB_CORES    = 54,
    93     SYS_GET_THREAD_INFO = 55,
    94     SYS_FBF             = 56,
    95     SYS_SOCKET          = 57,
     92    SYS_SOCKET          = 53,
    9693
    97     SYSCALLS_NR         = 58,
     94    SYSCALLS_NR         = 54,
    9895
    9996} syscalls_t;
  • trunk/kernel/syscalls/sys_alarm.c

    r506 r683  
    33 *
    44 * Author    Alain Greiner (2016,2017)
    5 *
     5 *
    66 * Copyright (c) UPMC Sorbonne Universites
    77 *
  • trunk/kernel/syscalls/sys_barrier.c

    r670 r683  
    22 * sys_barrier.c - Access a POSIX barrier.
    33 *
    4  * authors       Alain Greiner (2016,2017,2018,2019)
     4 * authors       Alain Greiner (2016,2017,2018,2019,2020)
    55 *
    66 * Copyright (c) UPMC Sorbonne Universites
     
    3333#include <remote_barrier.h>
    3434
     35/////////////////////////////////////////////////////////////////////////////////
     36// This function returns a printable string for the barrier related command type.
     37/////////////////////////////////////////////////////////////////////////////////
     38
     39#if DEBUG_SYS_SOCKET || DEBUG_SYSCALLS_ERROR
     40static char* barrier_cmd_str( uint32_t type )
     41{
     42    if     ( type == BARRIER_INIT     ) return "INIT";
     43    else if( type == BARRIER_WAIT     ) return "WAIT";
     44    else if( type == BARRIER_DESTROY  ) return "DESTROY";
     45    else                                return "undefined";
     46}
     47#endif
     48
    3549//////////////////////////////////
    3650int sys_barrier( intptr_t   vaddr,
     
    5367if( DEBUG_SYS_BARRIER < tm_start )
    5468printk("\n[%s] thread[%x,%x] enters for %s / count %d / cycle %d\n",
    55 __FUNCTION__, process->pid, this->trdid, sys_barrier_op_str(operation), count,
     69__FUNCTION__, process->pid, this->trdid, barrier_cmd_str(operation), count,
    5670(uint32_t)tm_start );
    5771#endif
    5872
    5973    // check vaddr in user vspace
    60         error = vmm_get_vseg( process , vaddr , &vseg );
    61         if( error )
     74        if( vmm_get_vseg( process , vaddr , &vseg ) )
    6275    {
    6376
     
    6578if( DEBUG_SYSCALLS_ERROR < (uint32_t)tm_start )
    6679printk("\n[ERROR] in %s for %s : unmapped barrier %x / thread[%x,%x]\n",
    67 __FUNCTION__, sys_barrier_op_str(operation), vaddr, process->pid, this->trdid );
    68 #endif
    69         this->errno = error;
     80__FUNCTION__, barrier_cmd_str(operation), vaddr, process->pid, this->trdid );
     81#endif
     82        this->errno = EINVAL;
    7083        return -1;
    7184    }
     
    7992            if( attr != 0 )   // QDT barrier required
    8093            {
    81                 error = vmm_get_vseg( process , attr , &vseg );
    82                 if( error )
     94                if( vmm_get_vseg( process , attr , &vseg ) )
    8395                {
    8496
  • trunk/kernel/syscalls/sys_display.c

    r670 r683  
    529529
    530530            // display socket descriptor on TXT0
    531             socket_display( XPTR( file_cxy , socket ), NULL );
     531            socket_display( XPTR( file_cxy , socket ), __FUNCTION__ , NULL );
    532532
    533533            break;
  • trunk/kernel/syscalls/sys_exec.c

    r670 r683  
    3838#include <syscalls.h>
    3939
     40////////////////////////////////////////////////i////////////////////////////////////////
     41// This static function is called twice by the sys_exec() function :
     42// - to register the main() arguments (args) in the process <exec_info> structure.
     43// - to register the environment variables (envs) in the <exec_info> structure.
     44// In both cases the input is an array of NULL terminated string pointers in user space,
     45// identified by the <u_pointers> argument. The strings can be dispatched anywhere in
     46// the calling user process space. The max number of envs, and the max number of args 
     47// are defined by the CONFIG_PROCESS_ARGS_NR and CONFIG_PROCESS_ENVS_MAX_NR parameters.
     48////////////////////////////////////////////////i////////////////////////////////////////
     49// Implementation Note:
     50// Both the array of pointers and the strings themselve are stored in kernel space in one
     51// single, dynamically allocated, kernel buffer containing an integer number of pages,
     52// defined by the CONFIG_VMM_ENVS_SIZE and CONFIG_VMM_STACK_SIZE parameters.
     53// These two kernel buffers contains :
     54// - in the first bytes a fixed size kernel array of kernel pointers on the strings.
     55// - in the following bytes the strings themselves.
     56// The exec_info_t structure is defined in the <process.h> file.
     57////////////////////////////////////////////////i////////////////////////////////////////
     58// @ is_args     : [in]    true if called for (args) / false if called for (envs).
     59// @ u_pointers  : [in]    array of pointers on the strings (in user space).
     60// @ exec_info   : [inout] pointer on the exec_info structure.
     61// @ return 0 if success / non-zero if too many strings or no memory.
     62////////////////////////////////////////////////i////////////////////////////////////////
     63static error_t exec_get_strings( bool_t         is_args,
     64                                 char        ** u_pointers,
     65                                 exec_info_t  * exec_info )
     66{
     67    uint32_t     index;           // slot index in pointers array
     68    uint32_t     length;          // string length (in bytes)
     69    uint32_t     pointers_bytes;  // number of bytes to store pointers
     70    uint32_t     max_index;       // max size of pointers array
     71    char      ** k_pointers;      // base of kernel array of pointers
     72    char       * k_buf_ptr;       // pointer on first empty slot in strings buffer
     73    uint32_t     k_buf_space;     // number of bytes available in string buffer
     74    char       * k_buf;           // kernel buffer for both pointers & strings
     75
     76#if DEBUG_SYS_EXEC
     77thread_t * this  = CURRENT_THREAD;
     78uint32_t   cycle = (uint32_t)hal_get_cycles();
     79#endif
     80
     81    // Allocate one block of physical memory for both the pointers and the strings
     82
     83    if( is_args )
     84    {
     85        k_buf = kmem_alloc( bits_log2(CONFIG_VMM_ARGS_SIZE << CONFIG_PPM_PAGE_ORDER), AF_ZERO );
     86
     87        pointers_bytes = (CONFIG_PROCESS_ARGS_MAX_NR + 1) * sizeof(char *);
     88        k_pointers     = (char **)k_buf;
     89        k_buf_ptr      = k_buf + pointers_bytes;
     90        k_buf_space    = (CONFIG_VMM_ARGS_SIZE << CONFIG_PPM_PAGE_ORDER) - pointers_bytes;
     91        max_index      = CONFIG_PROCESS_ARGS_MAX_NR + 1;
     92
     93#if DEBUG_SYS_EXEC
     94if( DEBUG_SYS_EXEC < cycle )
     95printk("\n[%s] thread[%x,%x] for args / u_buf %x / k_buf %x\n",
     96__FUNCTION__, this->process->pid, this->trdid, u_pointers, k_buf );
     97#endif
     98
     99    }
     100    else  // envs
     101    {
     102        k_buf = kmem_alloc( bits_log2(CONFIG_VMM_ENVS_SIZE << CONFIG_PPM_PAGE_ORDER), AF_ZERO );
     103
     104        pointers_bytes = (CONFIG_PROCESS_ENVS_MAX_NR + 1) * sizeof(char *);
     105        k_pointers     = (char **)k_buf;
     106        k_buf_ptr      = k_buf + pointers_bytes;
     107        k_buf_space    = (CONFIG_VMM_ENVS_SIZE << CONFIG_PPM_PAGE_ORDER) - pointers_bytes;
     108        max_index      = CONFIG_PROCESS_ENVS_MAX_NR + 1;
     109
     110#if DEBUG_SYS_EXEC
     111if( DEBUG_SYS_EXEC < cycle )
     112printk("\n[%s] thread[%x,%x] for envs / u_buf %x / k_buf %x\n",
     113__FUNCTION__, this->process->pid, this->trdid, u_pointers, k_buf );
     114#endif
     115
     116    }
     117
     118    // copy the user array of pointers to kernel buffer
     119    hal_copy_from_uspace( XPTR( local_cxy , k_pointers ),
     120                          u_pointers,
     121                          pointers_bytes );
     122
     123    // WARNING : the pointers copied in the k_pointers[] array are user pointers,
     124    // after the loop below, the k_pointers[] array contains kernel pointers.
     125
     126#if DEBUG_SYS_EXEC
     127if( DEBUG_SYS_EXEC < cycle )
     128printk("\n[%s] thread[%x,%x] moved u_ptr array of pointers to k_ptr array\n",
     129__FUNCTION__, this->process->pid, this->trdid );
     130#endif
     131
     132    // scan kernel array of pointers to copy strings to kernel buffer
     133    for( index = 0 ; index < max_index ; index++ )
     134    {
     135        // exit loop if (k_pointers[index] == NUll)
     136        if( k_pointers[index] == NULL ) break;
     137
     138        // compute string length (without the NUL character)
     139        length = hal_strlen_from_uspace( k_pointers[index] );
     140
     141        // return error if overflow in kernel buffer
     142        if( length > k_buf_space ) return -1;
     143
     144        // copy the string itself to kernel buffer
     145        hal_copy_from_uspace( XPTR( local_cxy , k_buf_ptr ),
     146                              k_pointers[index],
     147                              length + 1 );
     148
     149#if DEBUG_SYS_EXEC
     150if( DEBUG_SYS_EXEC < cycle )
     151printk("\n[%s] thread[%x,%x] copied string[%d] <%s> to kernel buffer / length %d\n",
     152__FUNCTION__, this->process->pid, this->trdid, index, k_buf_ptr, length );
     153#endif
     154
     155        // replace the user pointer by a kernel pointer in the k_pointer[] array
     156        k_pointers[index] = k_buf_ptr;
     157
     158        // increment loop variables
     159        k_buf_ptr   += (length + 1);
     160        k_buf_space -= (length + 1);
     161
     162#if DEBUG_SYS_EXEC
     163if( DEBUG_SYS_EXEC < cycle )
     164{
     165    if( k_pointers[0] != NULL )
     166    printk("\n[%s] thread[%x,%x] : &arg0 = %x / arg0 = <%s>\n",
     167    __FUNCTION__, this->process->pid, this->trdid, k_pointers[0], k_pointers[0] );
     168    else
     169    printk("\n[%s] thread[%x,%x] : unexpected NULL value for &arg0\n",
     170    __FUNCTION__, this->process->pid, this->trdid );
     171}
     172#endif
     173
     174    }  // end loop on index
     175
     176    // update into exec_info structure
     177    if( is_args )
     178    {
     179        exec_info->args_pointers  =  k_pointers;
     180        exec_info->args_nr        =  index;
     181    }
     182    else
     183    {
     184        exec_info->envs_pointers  =  k_pointers;
     185        exec_info->envs_buf_free  =  k_buf_ptr;
     186        exec_info->envs_nr        =  index;
     187    }
     188
     189#if DEBUG_SYS_EXEC
     190if( DEBUG_SYS_EXEC < cycle )
     191printk("\n[%s] thread[%x,%x] copied %d strings to kernel buffer\n",
     192__FUNCTION__, this->process->pid, this->trdid, index );
     193#endif
     194
     195    return 0;
     196
     197} // end exec_get_strings()
     198
     199
    40200///////////////////////////////
    41 int sys_exec( char  * pathname,       // .elf file pathname in user space
    42               char ** user_args,      // pointer on process arguments in user space
    43               char ** user_envs )     // pointer on env variables in user space
     201int sys_exec( char  * pathname,    // .elf file pathname in user space
     202              char ** user_args,   // pointer on array of process arguments in user space
     203              char ** user_envs )  // pointer on array of env variables in user space
    44204{
    45205    error_t       error;
     
    96256
    97257#if DEBUG_SYSCALLS_ERROR
     258if( DEBUG_SYSCALLS_ERROR < (uint32_t)tm_start )
    98259printk("\n[ERROR] in %s for thread[%x,%] : user_args pointer %x unmapped\n",
    99260__FUNCTION__, pid, trdid, user_args );
     
    115276                return -1;
    116277        }
    117 
    118 #if DEBUG_SYS_EXEC
    119 if( DEBUG_SYS_EXEC < (uint32_t)tm_start )
    120 printk("\n[%s] thread[%x,%x] enter / path <%s> / args %x / envs %x / cycle %d\n",
    121 __FUNCTION__, pid, trdid, &process->exec_info.path[0], user_args, user_envs, cycle );
    122 #endif
    123278
    124279    // 1. copy "pathname" in kernel exec_info structure
     
    127282                            CONFIG_VFS_MAX_PATH_LENGTH );
    128283
     284#if DEBUG_SYS_EXEC
     285if( DEBUG_SYS_EXEC < (uint32_t)tm_start )
     286printk("\n[%s] thread[%x,%x] enter / path <%s> / args %x / envs %x / cycle %d\n",
     287__FUNCTION__, pid, trdid, &process->exec_info.path[0],
     288user_args, user_envs, (uint32_t)tm_start );
     289#endif
     290
    129291    // 2. copy "arguments" pointers & strings in process exec_info if required
    130292    if( user_args != NULL )
    131293    {
    132         if( process_exec_get_strings( true , user_args , &process->exec_info ) )
     294        if( exec_get_strings( true , user_args , &process->exec_info ) )
    133295        {
    134296
    135297#if DEBUG_SYSCALLS_ERROR
    136298if( DEBUG_SYSCALLS_ERROR < (uint32_t)tm_start )
    137 printk("\n[ERROR] in %s : thread[%x,%] get arguments for <%s>\n",
     299printk("\n[ERROR] in %s : thread[%x,%] cannot get arguments for <%s>\n",
    138300__FUNCTION__, pid, trdid, pathname );
    139301#endif
     
    144306#if DEBUG_SYS_EXEC
    145307if( DEBUG_SYS_EXEC < (uint32_t)tm_start )
    146 printk("\n[%s] thread[%x,%x] got arguments / arg[0] = <%s>\n",
     308printk("\n[%s] thread[%x,%x] set arguments in exec_info / arg[0] = <%s>\n",
    147309__FUNCTION__, pid, trdid, process->exec_info.args_pointers[0] );
    148310#endif
     
    153315    if( user_envs != NULL )
    154316    {
    155         if( process_exec_get_strings( false , user_envs , &process->exec_info ) )
     317        if( exec_get_strings( false , user_envs , &process->exec_info ) )
    156318        {
    157319
    158320#if DEBUG_SYSCALLS_ERROR
    159321if( DEBUG_SYSCALLS_ERROR < (uint32_t)tm_start )
    160 printk("\n[ERROR] in %s : thread[%x,%] get env variables for <%s>\n",
     322printk("\n[ERROR] in %s : thread[%x,%] cannot get env variables for <%s>\n",
    161323__FUNCTION__, pid, trdid, pathname );
    162324#endif
     
    167329#if DEBUG_SYS_EXEC
    168330if( DEBUG_SYS_EXEC < (uint32_t)tm_start )
    169 printk("\n[%s] thread[%x,%x] got envs / env[0] = <%s>\n",
     331printk("\n[%s] thread[%x,%x] set envs in exec_info / env[0] = <%s>\n",
    170332__FUNCTION__, pid, trdid, process->exec_info.envs_pointers[0] );
    171333#endif
  • trunk/kernel/syscalls/sys_kill.c

    r664 r683  
    22 * sys_kill.c - Kernel function implementing the "kill" system call.
    33 *
    4  * Author    Alain Greiner (2016,2017,2018)
     4 * Author    Alain Greiner     (2016,2017,2018,2019,2020)
    55 *
    66 * Copyright (c)  UPMC Sorbonne Universites
     
    6767    process_t * process = this->process;
    6868
    69 #if (DEBUG_SYS_KILL || CONFIG_INSTRUMENTATION_SYSCALLS)
     69#if DEBUG_SYS_KILL || DEBUG_SYSCALLS_ERROR || CONFIG_INSTRUMENTATION_SYSCALLS
    7070uint64_t     tm_start = hal_get_cycles();
    7171#endif
    7272
    7373#if DEBUG_SYS_KILL
    74 tm_start = hal_get_cycles();
    7574if( DEBUG_SYS_KILL < tm_start )
    7675printk("\n[%s] thread[%x,%x] enter : %s to process %x / cycle %d\n",
     
    9594
    9695#if DEBUG_SYSCALLS_ERROR
    97 printk("\n[ERROR] in %s : process %x not found\n", __FUNCTION__, pid );
     96if( DEBUG_SYSCALLS_ERROR < tm_start )
     97printk("\n[ERROR] in %s : thread[%x,%x] / process %x not found\n",
     98__FUNCTION__, process->pid, this->trdid, pid );
    9899#endif
    99100        this->errno = EINVAL;
     
    175176
    176177#if DEBUG_SYSCALLS_ERROR
    177 printk("\n[ERROR] in %s : process %x cannot kill itself\n", __FUNCTION__, pid );
     178if( DEBUG_SYSCALLS_ERROR < tm_start )
     179printk("\n[ERROR] in %s : thread[%x,%x] / process %x cannot kill itself\n",
     180__FUNCTION__, process->pid, this->trdid, pid );
    178181#endif
    179182                this->errno = EINVAL;
     
    186189
    187190#if DEBUG_SYSCALLS_ERROR
    188 printk("\n[ERROR] in %s : process_init cannot be killed\n", __FUNCTION__ );
     191if( DEBUG_SYSCALLS_ERROR < tm_start )
     192printk("\n[ERROR] in %s : thread[%x,%x] / process_init cannot be killed\n",
     193 __FUNCTION__, process->pid, this->trdid);
    189194#endif
    190195                        this->errno = EINVAL;
     
    219224
    220225#if DEBUG_SYSCALLS_ERROR
    221 printk("\n[ERROR] in %s : illegal signal %d / process %x\n", __FUNCTION__, sig_id, pid );
     226if( DEBUG_SYSCALLS_ERROR < tm_start )
     227printk("\n[ERROR] in %s : thread[%x,%x] / illegal signal %d\n",
     228__FUNCTION__, process->pid, this->trdid, sig_id );
    222229#endif
    223230            this->errno = EINVAL;
     
    234241#if DEBUG_SYS_KILL
    235242if( DEBUG_SYS_KILL < tm_end )
    236 printk("\n[%s] thread[%x,%x] exit / process %x / %s / cost = %d / cycle %d\n",
     243printk("\n[%s] thread[%x,%x] exit / process %x / %s / cycle %d\n",
    237244__FUNCTION__ , this->process->pid, this->trdid, pid,
    238 sig_type_str(sig_id), (uint32_t)(tm_end - tm_start), (uint32_t)tm_end );
     245sig_type_str(sig_id), (uint32_t)tm_end );
    239246#endif
    240247
  • trunk/kernel/syscalls/sys_opendir.c

    r670 r683  
    6565
    6666#if DEBUG_SYSCALLS_ERROR
    67 if( DEBUG_SYSCALLS_ERROR < (uint32_t)tm_start )
    68 printk("\n[ERROR] in %s : thread[%x,%x] / DIR buffer %x unmapped\n",
    69 __FUNCTION__ , process->pid , this->trdid, dirp );
     67printk("\n[ERROR] in %s : thread[%x,%x] / DIR buffer %x unmapped / cycle %d\n",
     68__FUNCTION__ , process->pid , this->trdid, dirp, (uint32_t)tm_start );
    7069#endif
    7170                this->errno = EINVAL;
     
    8079
    8180#if DEBUG_SYSCALLS_ERROR
    82 if( DEBUG_SYSCALLS_ERROR < (uint32_t)tm_start )
    83 printk("\n[ERROR] in %s : thread[%x,%x] / pathname %x unmapped\n",
    84 __FUNCTION__ , process->pid , this->trdid, pathname );
     81printk("\n[ERROR] in %s : thread[%x,%x] / pathname %x unmapped / cycle %d\n",
     82__FUNCTION__ , process->pid , this->trdid, pathname, (uint32_t)tm_start );
    8583#endif
    8684                this->errno = EINVAL;
     
    9290
    9391#if DEBUG_SYSCALLS_ERROR
    94 if( DEBUG_SYSCALLS_ERROR < (uint32_t)tm_start )
    95 printk("\n[ERROR] in %s / thread[%x,%x] : pathname too long\n",
    96  __FUNCTION__ , process->pid , this->trdid );
     92printk("\n[ERROR] in %s / thread[%x,%x] : pathname too long / cycle %d\n",
     93 __FUNCTION__ , process->pid , this->trdid, (uint32_t)tm_start );
    9794#endif
    9895        this->errno = ENFILE;
     
    138135
    139136#if DEBUG_SYSCALLS_ERROR
    140 if( DEBUG_SYSCALLS_ERROR < (uint32_t)tm_start )
    141 printk("\n[ERROR] in %s : thread[%x,%x] / cannot found directory <%s>\n",
    142 __FUNCTION__ , process->pid , this->trdid , kbuf );
     137printk("\n[ERROR] in %s : thread[%x,%x] / cannot found directory <%s> / cycle %d\n",
     138__FUNCTION__ , process->pid , this->trdid , kbuf , (uint32_t)tm_start );
    143139#endif
    144140                this->errno = ENFILE;
     
    155151
    156152#if DEBUG_SYSCALLS_ERROR
    157 if( DEBUG_SYSCALLS_ERROR < (uint32_t)tm_start )
    158 printk("\n[ERROR] in %s : thread[%x,%x] / <%s> is not a directory\n",
    159 __FUNCTION__ , process->pid , this->trdid , kbuf );
     153printk("\n[ERROR] in %s : thread[%x,%x] / <%s> is not a directory / cycle %d\n",
     154__FUNCTION__ , process->pid , this->trdid , kbuf , (uint32_t)tm_start );
    160155#endif
    161156                this->errno = ENFILE;
     
    163158        }
    164159   
    165     // create a new user_dir_t structure in target directory inode cluster
     160    // create an user_dir_t structure in cluster containing directory inode
    166161    // map it in the reference user process VMM (in a new ANON vseg)
    167162    // an get the local pointer on the created user_dir_t structure
     
    183178
    184179#if DEBUG_SYSCALLS_ERROR
    185 if( DEBUG_SYSCALLS_ERROR < (uint32_t)tm_start )
    186 printk("\n[ERROR] in %s : thread[%x,%x] / cannot create user_dir for <%s>\n",
    187 __FUNCTION__ , process->pid , this->trdid , kbuf );
     180printk("\n[ERROR] in %s : thread[%x,%x] / cannot create user_dir for <%s> / cycle %d\n",
     181__FUNCTION__ , process->pid , this->trdid , kbuf , (uint32_t)tm_start );
    188182#endif
    189183                this->errno = ENFILE;
  • trunk/kernel/syscalls/sys_pipe.c

    r670 r683  
    3636{
    3737    vseg_t       * vseg;
    38     kmem_req_t     req;
    3938    pipe_t       * pipe;
    4039    vfs_file_t   * file_0;
     
    8887    // 2. allocate memory for fd[0] file descriptor in local cluster
    8988    // we don't use the vfs_file_create function because there is no inode.
    90         req.type  = KMEM_KCM;
    91         req.order = bits_log2( sizeof(vfs_file_t) );
    92     req.flags = AF_ZERO;
    93         file_0    = kmem_alloc( &req );
     89        file_0 = kmem_alloc( bits_log2(sizeof(vfs_file_t)) , AF_ZERO );
    9490
    9591    if( file_0 == NULL )
     
    120116
    121117    // 4. allocate memory for fd[1] file descriptor in local cluster
    122         req.type  = KMEM_KCM;
    123         req.order = bits_log2( sizeof(vfs_file_t) );
    124     req.flags = AF_ZERO;
    125         file_1    = kmem_alloc( &req );
     118    // we don't use the vfs_file_create function because there is no inode.
     119        file_1 = kmem_alloc( bits_log2(sizeof(vfs_file_t)) , AF_ZERO );
    126120
    127121    if( file_1 == NULL )
     
    178172error_5:    // release memory allocated for fd[1] file descriptor
    179173
    180     req.ptr = file_1;
    181     kmem_free( &req );
     174    kmem_free( file_1 , bits_log2(sizeof(vfs_file_t)) );
    182175
    183176error_4:    // release fdid_0 from fd_array[]
     
    187180error_3:    // release memory allocated for fd[0] file descriptor
    188181
    189     req.ptr = file_0;
    190     kmem_free( &req );
     182    kmem_free( file_0 , bits_log2(sizeof(vfs_file_t)) );
    191183
    192184error_2:    // release memory allocated for the pipe
  • trunk/kernel/syscalls/sys_place_fork.c

    r670 r683  
    5555       
    5656#if DEBUG_SYSCALLS_ERROR
    57 if( DEBUG_SYSCALLS_ERROR < (uint32_t)tm_start );
     57if( DEBUG_SYSCALLS_ERROR < (uint32_t)tm_start )
    5858printk("\n[ERROR] in %s : thread[%x,‰x] / illegal cxy argument %x\n",
    5959__FUNCTION__ , process->pid , this->trdid , cxy );
  • trunk/kernel/syscalls/sys_socket.c

    r670 r683  
    4545
    4646#if DEBUG_SYS_SOCKET
    47 static char* socket_cmd_type_str( uint32_t type )
     47static char* socket_user_cmd_str( uint32_t type )
    4848{
    4949    if     ( type == SOCK_CREATE      ) return "CREATE";
     
    5454    else if( type == SOCK_SEND        ) return "SEND";
    5555    else if( type == SOCK_RECV        ) return "RECV";
     56    else if( type == SOCK_SENDTO      ) return "SENDTO";
     57    else if( type == SOCK_RECVFROM    ) return "RECVFROM";
    5658    else                                return "undefined";
    5759}
     
    7981
    8082#if DEBUG_SYS_SOCKET
     83char kbuf[64];
    8184if( DEBUG_SYS_SOCKET < (uint32_t)tm_start )
    82 printk("\n[%s] thread[%x,%x] enter / %s / a1 %x / a2 %x / a3 %x / cycle %d\n",
    83 __FUNCTION__, process->pid, this->trdid, socket_cmd_type_str(cmd),
     85printk("\n[%s] thread[%x,%x] enter for %s / a1 %x / a2 %x / a3 %x / cycle %d\n",
     86__FUNCTION__, process->pid, this->trdid, socket_user_cmd_str(cmd),
    8487arg1, arg2, arg3, (uint32_t)tm_start );
    8588#endif
     
    97100
    98101#if DEBUG_SYSCALLS_ERROR
    99 if( DEBUG_SYSCALLS_ERROR < (uint32_t)tm_start )
    100 printk("\n[ERROR] in %s : thread[%x,%x] / CREATE / domain %d =! AF_INET\n",
    101 __FUNCTION__ , process->pid , this->trdid , domain );
     102printk("\n[ERROR] in %s : thread[%x,%x] / CREATE / domain %d =! AF_INET / cycle %d\n",
     103__FUNCTION__ , process->pid , this->trdid , domain , (uint32_t)tm_start );
    102104#endif
    103105                this->errno = EINVAL;
     
    110112
    111113#if DEBUG_SYSCALLS_ERROR
    112 if( DEBUG_SYSCALLS_ERROR < (uint32_t)tm_start )
    113 printk("\n[ERROR] in %s : thread[%x,%x] / CREATE / illegal socket type\n",
    114 __FUNCTION__ , process->pid , this->trdid );
     114printk("\n[ERROR] in %s : thread[%x,%x] / CREATE / illegal socket type / cycle %d\n",
     115__FUNCTION__ , process->pid , this->trdid  , (uint32_t)tm_start);
    115116#endif
    116117                this->errno = EINVAL;
     
    126127
    127128#if DEBUG_SYSCALLS_ERROR
    128 if( DEBUG_SYSCALLS_ERROR < (uint32_t)tm_start )
    129 printk("\n[ERROR] in %s : thread[%x,%x] / CREATE / cannot create socket\n",
    130 __FUNCTION__ , process->pid , this->trdid );
     129printk("\n[ERROR] in %s : thread[%x,%x] / CREATE / cannot create socket / cycle %d\n",
     130__FUNCTION__ , process->pid , this->trdid  , (uint32_t)tm_start);
    131131#endif
    132132                this->errno = EINVAL;
     
    148148
    149149#if DEBUG_SYSCALLS_ERROR
    150 if( DEBUG_SYSCALLS_ERROR < (uint32_t)tm_start )
    151 printk("\n[ERROR] in %s : thread[%x,%x] / BIND / socket address %x unmapped\n",
    152 __FUNCTION__ , process->pid , this->trdid , (intptr_t)arg2 );
     150printk("\n[ERROR] in %s : thread[%x,%x] / BIND / socket address %x unmapped / cycle %d\n",
     151__FUNCTION__ , process->pid , this->trdid , (intptr_t)arg2 , (uint32_t)tm_start );
    153152#endif
    154153                this->errno = EINVAL;
     
    171170
    172171#if DEBUG_SYSCALLS_ERROR
    173 if( DEBUG_SYSCALLS_ERROR < (uint32_t)tm_start )
    174 printk("\n[ERROR] in %s : thread[%x,%x] / BIND / cannot access socket[%x,%d]\n",
    175 __FUNCTION__ , process->pid , this->trdid ,  process->pid, fdid );
     172printk("\n[ERROR] in %s : thread[%x,%x] / BIND / cannot access socket[%x,%d] / cycle %d\n",
     173__FUNCTION__ , process->pid , this->trdid ,  process->pid, fdid , (uint32_t)tm_start );
    176174#endif
    177175                this->errno = EINVAL;
     
    195193
    196194#if DEBUG_SYSCALLS_ERROR
    197 if( DEBUG_SYSCALLS_ERROR < (uint32_t)tm_start )
    198 printk("\n[ERROR] in %s : thread[%x,%x] / LISTEN / cannot access socket[%x,%d]\n",
    199 __FUNCTION__ , process->pid , this->trdid ,  process->pid, fdid );
     195printk("\n[ERROR] in %s : thread[%x,%x] / LISTEN / cannot access socket[%x,%d] / cycle %d\n",
     196__FUNCTION__ , process->pid , this->trdid ,  process->pid, fdid , (uint32_t)tm_start );
    200197#endif
    201198                this->errno = EINVAL;
     
    217214
    218215#if DEBUG_SYSCALLS_ERROR
    219 if( DEBUG_SYSCALLS_ERROR < (uint32_t)tm_start )
    220 printk("\n[ERROR] in %s : thread[%x,%x] / CONNECT / server address %x unmapped\n",
    221 __FUNCTION__ , process->pid , this->trdid , (intptr_t)arg2 );
     216printk("\n[ERROR] in %s : thread[%x,%x] / CONNECT / server address %x unmapped / cycle %d\n",
     217__FUNCTION__ , process->pid , this->trdid , (intptr_t)arg2 , (uint32_t)tm_start );
    222218#endif
    223219                this->errno = EINVAL;
     
    239235
    240236#if DEBUG_SYSCALLS_ERROR
    241 if( DEBUG_SYSCALLS_ERROR < (uint32_t)tm_start )
    242 printk("\n[ERROR] in %s : thread[%x,%x] / LISTEN / cannot access socket[%x,%d]\n",
    243 __FUNCTION__ , process->pid , this->trdid ,  process->pid, fdid );
     237printk("\n[ERROR] in %s : thread[%x,%x] / LISTEN / cannot access socket[%x,%d] / cycle %d\n",
     238__FUNCTION__ , process->pid , this->trdid ,  process->pid, fdid , (uint32_t)tm_start );
    244239#endif
    245240                this->errno = EINVAL;
     
    261256
    262257#if DEBUG_SYSCALLS_ERROR
    263 if( DEBUG_SYSCALLS_ERROR < (uint32_t)tm_start )
    264 printk("\n[ERROR] in %s : thread[%x,%x] / CONNECT / server address %x unmapped\n",
    265 __FUNCTION__ , process->pid , this->trdid , (intptr_t)arg2 );
     258printk("\n[ERROR] in %s : thread[%x,%x] / CONNECT / server address %x unmapped / cycle %d\n",
     259__FUNCTION__ , process->pid , this->trdid , (intptr_t)arg2 , (uint32_t)tm_start );
    266260#endif
    267261                this->errno = EINVAL;
     
    275269                                 &k_sockaddr.sin_port );
    276270
    277             if( ret )
    278             {
    279 
    280 #if DEBUG_SYSCALLS_ERROR
    281 if( DEBUG_SYSCALLS_ERROR < (uint32_t)tm_start )
    282 printk("\n[ERROR] in %s : thread[%x,%x] / ACCEPT / cannot access socket[%x,%d]\n",
    283 __FUNCTION__ , process->pid , this->trdid , process->pid, fdid );
     271            if( ret < 0 )
     272            {
     273
     274#if DEBUG_SYSCALLS_ERROR
     275printk("\n[ERROR] in %s : thread[%x,%x] / ACCEPT / cannot access socket[%x,%d] / cycle %d\n",
     276__FUNCTION__ , process->pid , this->trdid , process->pid, fdid , (uint32_t)tm_start );
    284277#endif
    285278                this->errno = EINVAL;
     
    305298
    306299#if DEBUG_SYSCALLS_ERROR
    307 if( DEBUG_SYSCALLS_ERROR < (uint32_t)tm_start )
    308 printk("\n[ERROR] in %s : thread[%x,%x] / SEND / buffer %x unmapped\n",
    309 __FUNCTION__ , process->pid , this->trdid , (intptr_t)arg2 );
    310 #endif
    311                 this->errno = EINVAL;
    312                 ret = -1;
    313                 break;
    314             }
    315 
    316             // check length
    317             if( length == 0 )
    318             {
    319 
    320 #if DEBUG_SYSCALLS_ERROR
    321 if( DEBUG_SYSCALLS_ERROR < (uint32_t)tm_start )
    322 printk("\n[ERROR] in %s : thread[%x,%x] / SEND / buffer length is 0\n",
    323 __FUNCTION__ , process->pid , this->trdid , (intptr_t)arg2 );
    324 #endif
    325                 this->errno = EINVAL;
    326                 ret = -1;
    327                 break;
    328             }
    329 
    330             // cal relevant relevant socket function
    331             ret = socket_send( fdid , u_buf , length );
    332 
    333             if( ret < 0 )
    334             {
    335 
    336 #if DEBUG_SYSCALLS_ERROR
    337 if( DEBUG_SYSCALLS_ERROR < (uint32_t)tm_start )
    338 printk("\n[ERROR] in %s : thread[%x,%x] / SEND / cannot access socket[%x,%d] \n",
    339 __FUNCTION__ , process->pid , this->trdid , process->pid, fdid );
    340 #endif
    341                 this->errno = EINVAL;
    342             }
     300printk("\n[ERROR] in %s : thread[%x,%x] / SEND / u_buf %x unmapped / cycle %d\n",
     301__FUNCTION__ , process->pid , this->trdid , (intptr_t)arg2 , (uint32_t)tm_start );
     302#endif
     303                this->errno = EINVAL;
     304                ret = -1;
     305                break;
     306            }
     307
     308            // check length argument
     309            if( (length == 0) || (length > (1<<CONFIG_SOCK_TX_BUF_ORDER)) )
     310            {
     311
     312#if DEBUG_SYSCALLS_ERROR
     313printk("\n[ERROR] in %s : thread[%x,%x] / SEND / bad buffer length %d / cycle %d\n",
     314__FUNCTION__ , process->pid , this->trdid , length , (uint32_t)tm_start );
     315#endif
     316                this->errno = EINVAL;
     317                ret = -1;
     318                break;
     319            }
     320
     321            // cal relevant socket function
     322            ret = socket_send( fdid,
     323                               u_buf,
     324                               length );
     325            if( ret < 0 )
     326            {
     327
     328#if DEBUG_SYSCALLS_ERROR
     329printk("\n[ERROR] in %s : thread[%x,%x] / SEND / cannot access socket[%x,%d] / cycle %d\n",
     330__FUNCTION__ , process->pid , this->trdid , process->pid, fdid , (uint32_t)tm_start );
     331#endif
     332                this->errno = EINVAL;
     333            }
     334
     335#if DEBUG_SYS_SOCKET
     336if( DEBUG_SYS_SOCKET < (uint32_t)tm_start )
     337{
     338    hal_copy_from_uspace( XPTR( local_cxy , &kbuf ) , u_buf , ret );
     339    printk("\n[%s] thread[%x,%x] send %d bytes <%s>\n",
     340    __FUNCTION__, process->pid, this->trdid , ret, kbuf );
     341}
     342#endif
    343343            break;
    344344        }
     
    355355
    356356#if DEBUG_SYSCALLS_ERROR
    357 if( DEBUG_SYSCALLS_ERROR < (uint32_t)tm_start )
    358 printk("\n[ERROR] in %s : thread[%x,%x] / RECV / buffer %x unmapped\n",
    359 __FUNCTION__ , process->pid , this->trdid , (intptr_t)arg2 );
    360 #endif
    361                 this->errno = EINVAL;
    362                 ret = -1;
    363                 break;
    364             }
    365 
    366             // check length
    367             if( length == 0 )
    368             {
    369 
    370 #if DEBUG_SYSCALLS_ERROR
    371 if( DEBUG_SYSCALLS_ERROR < (uint32_t)tm_start )
    372 printk("\n[ERROR] in %s : thread[%x,%x] / RECV / buffer length is 0\n",
    373 __FUNCTION__ , process->pid , this->trdid , (intptr_t)arg2 );
     357printk("\n[ERROR] in %s : thread[%x,%x] / RECV / u_buf %x unmapped / cycle %d\n",
     358__FUNCTION__ , process->pid , this->trdid , (intptr_t)arg2 , (uint32_t)tm_start );
     359#endif
     360                this->errno = EINVAL;
     361                ret = -1;
     362                break;
     363            }
     364
     365            // check length argument
     366            if( (length == 0) || (length > (1<<CONFIG_SOCK_RX_BUF_ORDER)) )
     367            {
     368
     369#if DEBUG_SYSCALLS_ERROR
     370printk("\n[ERROR] in %s : thread[%x,%x] / RECV / bad buffer length %d / cycle %d\n",
     371__FUNCTION__ , process->pid , this->trdid , length , (uint32_t)tm_start );
    374372#endif
    375373                this->errno = EINVAL;
     
    379377
    380378            // cal relevant kernel socket function
    381             ret =  socket_recv( fdid , u_buf , length );
    382 
    383             if( ret < 0 )
    384             {
    385 
    386 #if DEBUG_SYSCALLS_ERROR
    387 if( DEBUG_SYSCALLS_ERROR < (uint32_t)tm_start )
    388 printk("\n[ERROR] in %s : thread[%x,%x] / RECV / cannot access socket[%x,%d] \n",
    389 __FUNCTION__ , process->pid , this->trdid , process->pid, fdid );
    390 #endif
    391                 this->errno = EINVAL;
    392             }
     379            ret =  socket_recv( fdid,
     380                                u_buf,
     381                                length );
     382            if( ret < 0 )
     383            {
     384
     385#if DEBUG_SYSCALLS_ERROR
     386printk("\n[ERROR] in %s : thread[%x,%x] / RECV / cannot access socket[%x,%d] / cycle %d\n",
     387__FUNCTION__ , process->pid , this->trdid , process->pid, fdid , (uint32_t)tm_start );
     388#endif
     389                this->errno = EINVAL;
     390            }
     391
     392#if DEBUG_SYS_SOCKET
     393if( DEBUG_SYS_SOCKET < (uint32_t)tm_start )
     394{
     395    hal_copy_from_uspace( XPTR( local_cxy , &kbuf ) , u_buf , ret );
     396    printk("\n[%s] thread[%x,%x] received %d bytes <%s>\n",
     397    __FUNCTION__, process->pid, this->trdid , ret, kbuf );
     398}
     399#endif
     400            break;
     401        }
     402        /////////////////
     403        case SOCK_SENDTO:
     404        {
     405            sockaddr_in_t k_remote_addr;
     406
     407            uint32_t      fdid          = (uint32_t)arg1 & 0x0000FFFF;
     408            uint32_t      length        = (uint32_t)arg1 >> 16;
     409            uint8_t     * u_buf         = (uint8_t *)(intptr_t)arg2;
     410            sockaddr_t  * u_remote_addr = (sockaddr_t *)(intptr_t)arg3;
     411
     412            // check u_buf mapped in user space
     413            if( vmm_get_vseg( process , (intptr_t)arg2 , &vseg ) )
     414            {
     415
     416#if DEBUG_SYSCALLS_ERROR
     417printk("\n[ERROR] in %s : thread[%x,%x] / SENDTO / u_buf %x unmapped / cycle %d\n",
     418__FUNCTION__ , process->pid , this->trdid , (intptr_t)arg2 , (uint32_t)tm_start );
     419#endif
     420                this->errno = EINVAL;
     421                ret = -1;
     422                break;
     423            }
     424
     425            // check u_remote_addr mapped in user space
     426            if( vmm_get_vseg( process , (intptr_t)arg3 , &vseg ) )
     427            {
     428
     429#if DEBUG_SYSCALLS_ERROR
     430printk("\n[ERROR] in %s : thread[%x,%x] / SENDTO / u_remote_addr %x unmapped / cycle %d\n",
     431__FUNCTION__ , process->pid , this->trdid , (intptr_t)arg3 , (uint32_t)tm_start );
     432#endif
     433                this->errno = EINVAL;
     434                ret = -1;
     435                break;
     436            }
     437
     438            // check length argument
     439            if( (length == 0) || (length > (1<<CONFIG_SOCK_TX_BUF_ORDER)) )
     440            {
     441
     442#if DEBUG_SYSCALLS_ERROR
     443printk("\n[ERROR] in %s : thread[%x,%x] / SENDTO / bad length %d / cycle %d\n",
     444__FUNCTION__ , process->pid , this->trdid , length , (uint32_t)tm_start );
     445#endif
     446                this->errno = EINVAL;
     447                ret = -1;
     448                break;
     449            }
     450
     451            // make a kernel copy of the sockaddr_t structure
     452            hal_copy_from_uspace( XPTR( local_cxy , &k_remote_addr ),
     453                                  u_remote_addr, sizeof(sockaddr_t) );
     454
     455            // cal relevant socket function
     456            ret = socket_sendto( fdid,
     457                                 u_buf,
     458                                 length,
     459                                 k_remote_addr.sin_addr,
     460                                 k_remote_addr.sin_port );
     461            if( ret < 0 )
     462            {
     463
     464#if DEBUG_SYSCALLS_ERROR
     465printk("\n[ERROR] in %s : thread[%x,%x] / SENDTO / cannot access socket[%x,%d] / cycle %d\n",
     466__FUNCTION__ , process->pid , this->trdid , process->pid, fdid , (uint32_t)tm_start );
     467#endif
     468                this->errno = EINVAL;
     469            }
     470
     471            break;
     472        }
     473        ///////////////////
     474        case SOCK_RECVFROM:
     475        {
     476            sockaddr_in_t k_remote_addr;
     477
     478            uint32_t      fdid          = (uint32_t)arg1 & 0x0000FFFF;
     479            uint32_t      length        = (uint32_t)arg1 >> 16;
     480            uint8_t     * u_buf         = (uint8_t *)(intptr_t)arg2;
     481            sockaddr_t  * u_remote_addr = (sockaddr_t *)(intptr_t)arg3;
     482
     483            // check buffer is mapped in user space
     484            if( vmm_get_vseg( process , (intptr_t)arg2 , &vseg ) )
     485            {
     486
     487#if DEBUG_SYSCALLS_ERROR
     488printk("\n[ERROR] in %s : thread[%x,%x] / RECVFROM / u_buf %x unmapped / cycle %d\n",
     489__FUNCTION__ , process->pid , this->trdid , (intptr_t)arg2 , (uint32_t)tm_start );
     490#endif
     491                this->errno = EINVAL;
     492                ret = -1;
     493                break;
     494            }
     495
     496            // check u_remote_addr mapped in user space
     497            if( vmm_get_vseg( process , (intptr_t)arg3 , &vseg ) )
     498            {
     499
     500#if DEBUG_SYSCALLS_ERROR
     501printk("\n[ERROR] in %s : thread[%x,%x] / RECVFROM / u_remote_addr %x unmapped / cycle %d\n",
     502__FUNCTION__ , process->pid , this->trdid , (intptr_t)arg3 , (uint32_t)tm_start );
     503#endif
     504                this->errno = EINVAL;
     505                ret = -1;
     506                break;
     507            }
     508
     509            // check length argument
     510            if( (length == 0) || (length > (1<<CONFIG_SOCK_RX_BUF_ORDER)) )
     511            {
     512
     513#if DEBUG_SYSCALLS_ERROR
     514printk("\n[ERROR] in %s : thread[%x,%x] / RECVFROM / bad length %d / cycle %d\n",
     515__FUNCTION__ , process->pid , this->trdid , length , (uint32_t)tm_start );
     516#endif
     517                this->errno = EINVAL;
     518                ret = -1;
     519                break;
     520            }
     521
     522            // make a kernel copy of the sockaddr_t structure
     523            hal_copy_from_uspace( XPTR( local_cxy , &k_remote_addr ),
     524                                  u_remote_addr, sizeof(sockaddr_t) );
     525
     526            // cal relevant socket function
     527            ret = socket_recvfrom( fdid,
     528                                   u_buf,
     529                                   length,
     530                                   k_remote_addr.sin_addr,
     531                                   k_remote_addr.sin_port );
     532            if( ret < 0 )
     533            {
     534
     535#if DEBUG_SYSCALLS_ERROR
     536printk("\n[ERROR] in %s : thread[%x,%x] / RECVFROM / cannot access socket[%x,%d] / cycle %d\n",
     537__FUNCTION__ , process->pid , this->trdid , process->pid, fdid , (uint32_t)tm_start );
     538#endif
     539                this->errno = EINVAL;
     540            }
     541
    393542            break;
    394543        }
     
    398547
    399548#if DEBUG_SYSCALLS_ERROR
    400 if( DEBUG_SYSCALLS_ERROR < (uint32_t)tm_start )
    401 printk("\n[ERROR] in %s : thread[%x,%x] / undefined socket operation %d\n",
    402 __FUNCTION__ , process->pid , this->trdid , cmd );
     549printk("\n[ERROR] in %s : thread[%x,%x] / undefined socket operation %d / cycle %d\n",
     550__FUNCTION__ , process->pid , this->trdid , cmd , (uint32_t)tm_start );
    403551#endif
    404552            this->errno = EINVAL;
     
    413561
    414562#if DEBUG_SYS_SOCKET
    415 if( DEBUG_SYS_SOCKET < tm_end )
    416 printk("\n[%s] thread[%x,%x] exit / cycle %d\n",
    417 __FUNCTION__, process->pid, this->trdid, (uint32_t)tm_end );
     563printk("\n[%s] thread[%x,%x] exit for %s / cycle %d\n",
     564__FUNCTION__, process->pid, this->trdid, socket_user_cmd_str(cmd), (uint32_t)tm_end );
    418565#endif
    419566
  • trunk/kernel/syscalls/sys_thread_exit.c

    r670 r683  
    22 * sys_thread_exit.c - terminates the execution of calling thread
    33 *
    4  * Authors   Alain Greiner (2016,2017,2018,2019)
     4 * Authors   Alain Greiner (2016,2017,2018,2019,2020)
    55 *
    66 * Copyright (c) UPMC Sorbonne Universites
     
    4444    pid_t       pid       = process->pid;
    4545   
     46#if DEBUG_SYS_THREAD_EXIT || DEBUG_SYSCALLS_ERROR
     47uint64_t     tm_start = hal_get_cycles();
     48#endif
     49
    4650    // check exit_value pointer in user space if required
    4751    if( exit_status != NULL )
     
    5357
    5458#if DEBUG_SYSCALLS_ERROR
    55 printk("\n[ERROR] in %s : exit_status buffer %x unmapped / thread[%x,%x]\n",
    56 __FUNCTION__, (intptr_t)exit_status, process->pid, this->trdid );
     59if( DEBUG_SYSCALLS_ERROR < (uint32_t)tm_start )
     60printk("\n[WARNING] in %s : exit_status buffer %x unmapped / thread[%x,%x]\n",
     61__FUNCTION__, (intptr_t)exit_status, pid, trdid );
    5762#endif
    5863            this->errno = EINVAL;
     
    6772
    6873#if DEBUG_SYSCALLS_ERROR
    69 printk("\n[ERROR] in %s : busylocks count = %d  / thread[%x,%x]\n",
    70 __FUNCTION__ , count, process->pid, this->trdid );
     74if( DEBUG_SYSCALLS_ERROR < (uint32_t)tm_start )
     75printk("\n[WARNING] in %s : busylocks count = %d  / thread[%x,%x]\n",
     76__FUNCTION__ , count, pid, trdid );
    7177#endif
    7278            this->errno = EINVAL;
     
    8490
    8591#if DEBUG_SYS_THREAD_EXIT
    86 uint64_t     tm_start = hal_get_cycles();
    87 if( DEBUG_SYS_THREAD_EXIT < tm_start )
     92if( DEBUG_SYS_THREAD_EXIT < (uint32_t)tm_start )
    8893printk("\n[%s] thread[%x,%x] is main => delete process / cycle %d\n",
    8994__FUNCTION__ , pid , trdid , (uint32_t)tm_start );
     
    96101
    97102#if DEBUG_SYS_THREAD_EXIT
    98 uint64_t     tm_start = hal_get_cycles();
    99 if( DEBUG_SYS_THREAD_EXIT < tm_start )
     103if( DEBUG_SYS_THREAD_EXIT < (uint32_t)tm_start )
    100104printk("\n[%s] thread[%x,%x] is not main => delete thread / cycle %d\n",
    101105__FUNCTION__ , pid , trdid , (uint32_t)tm_start );
  • trunk/kernel/syscalls/sys_thread_sleep.c

    r566 r683  
    11/*
    2  * sys_thread_sleep.c - put the calling thread in sleep state
     2 * sys_thread_sleep.c - block the calling thread on SLEEP, with or without alarm
    33 *
    4  * Author    Alain Greiner (2016,2017)
     4 * Author    Alain Greiner    (2016,2017,2018,2019,2020)
    55 *
    66 * Copyright (c) UPMC Sorbonne Universites
     
    2828#include <syscalls.h>
    2929
    30 //////////////////////
    31 int sys_thread_sleep( void )
     30///////////////////////////////////////////////////////////////////////////////////////
     31// This static function implements the alarm handler used to wake-up a thread
     32// when the amarm rings after after a sleep( seconds ) syscall.
     33///////////////////////////////////////////////////////////////////////////////////////
     34// @ thread_xp  : extended pointer on blocked thread.
     35///////////////////////////////////////////////////////////////////////////////////////
     36static void __attribute__((noinline)) sleep_alarm_handler( xptr_t thread_xp )
    3237{
     38    // stop the alarm
     39    alarm_stop( thread_xp );
    3340
    34     thread_t * this = CURRENT_THREAD;
     41    // unblock the thread
     42    thread_unblock( thread_xp , THREAD_BLOCKED_SLEEP );
     43
     44}  // end sleep_alarm_handler()
     45
     46////////////////////////////////////////
     47int sys_thread_sleep( uint32_t seconds )
     48{
     49    cycle_t    ncycles;    // number of cycles to sleep
     50
     51    thread_t  * this      = CURRENT_THREAD;
     52    xptr_t      thread_xp = XPTR( local_cxy , this );
     53
     54    cycle_t     tm_start = hal_get_cycles();
    3555
    3656#if DEBUG_SYS_THREAD_SLEEP
    37 uint64_t     tm_start;
    38 uint64_t     tm_end;
    39 tm_start = hal_get_cycles();
    40 if( DEBUG_SYS_THREAD_SLEEP < tm_start )
    41 printk("\n[DBG] %s : thread %x n process %x blocked / cycle %d\n",
    42 __FUNCTION__ , this->trdid, this->process->pid , (uint32_t)tm_start );
     57if( DEBUG_SYS_THREAD_SLEEP < (uint32_t)tm_start )
     58printk("\n[%s] thread[%x,%x] enter / cycle %d\n",
     59__FUNCTION__, this->process->pid, this->trdid, (uint32_t)tm_start );
    4360#endif
    4461
    45     thread_block( XPTR( local_cxy , this ) , THREAD_BLOCKED_GLOBAL );
    46     sched_yield("blocked on sleep");
     62    if( seconds == 0 )   // sleep without alarm
     63    {
     64 
     65#if DEBUG_SYS_THREAD_SLEEP
     66if( DEBUG_SYS_THREAD_SLEEP < tm_start )
     67printk("\n[%s] thread[%x,%x] blocks on <SLEEP> without alarm / cycle %d\n",
     68__FUNCTION__ , this->process->pid, this->trdid, (uint32_t)tm_start );
     69#endif
     70        // threads blocks and deschedules
     71        thread_block( thread_xp , THREAD_BLOCKED_SLEEP );
     72        sched_yield("sleep without alarm");
     73    }
     74    else                // sleep with alarm
     75    {
     76        // translate seconds to ncycles
     77        ncycles = seconds * LOCAL_CLUSTER->sys_clk;
     78
     79        // register & start the calling thread alarm
     80        alarm_start( thread_xp,
     81                     tm_start + ncycles,
     82                     &sleep_alarm_handler,
     83                     thread_xp );
     84 
     85#if DEBUG_SYS_THREAD_SLEEP
     86if( DEBUG_SYS_THREAD_SLEEP < tm_start )
     87printk("\n[DBG] %s : thread[%x,%x] blocks on <SLEEP> for %d seconds / cycle %d\n",
     88__FUNCTION__ , this->process->pid,  this->trdid, seconds, (uint32_t)tm_start );
     89#endif
     90        // thread blocks & deschedules
     91        thread_block( thread_xp , THREAD_BLOCKED_SLEEP );
     92        sched_yield("sleep with alarm");
     93    }
    4794
    4895#if DEBUG_SYS_THREAD_SLEEP
    49 tm_end = hal_get_cycles();
    5096if( DEBUG_SYS_THREAD_SLEEP < tm_end )
    51 printk("\n[DBG] %s : thread %x in process %x resume / cycle %d\n",
    52 __FUNCTION__ , this->trdid, this->process->pid , (uint32_t)tm_end );
     97printk("\n[%s] thread[%x,%x] resume / cycle %d\n",
     98__FUNCTION__ , this->process->pid, this->trdid, (uint32_t)tm_end );
    5399#endif
    54100
  • trunk/kernel/syscalls/sys_thread_wakeup.c

    r637 r683  
    11/*
    2  * sys_thread_wakeup.c - wakeup indicated thread
     2 * sys_thread_wakeup.c - unblock indicated thread from the SLEEP condition
    33 *
    4  * Author    Alain Greiner (2016,2017,2018,2019)
     4 * Author    Alain Greiner     (2016,2017,2018,2019,2020)
    55 *
    66 * Copyright (c) UPMC Sorbonne Universites
     
    2727#include <process.h>
    2828#include <errno.h>
    29 
    3029#include <syscalls.h>
    3130
     
    3635    process_t * process = this->process;
    3736
    38 #if (DEBUG_SYS_THREAD_WAKEUP || CONFIG_INSTRUMENTATION_SYSCALLS)
    39 uint64_t     tm_start = hal_get_cycles();
     37#if DEBUG_SYS_THREAD_WAKEUP || DEBUG_SYSCALLS_ERROR || CONFIG_INTRUMENTATION_SYSCALLS
     38cycle_t      tm_start = hal_get_cycles();
    4039#endif
    4140
    4241#if DEBUG_SYS_THREAD_WAKEUP
    43 if( DEBUG_SYS_THREAD_WAKEUP < tm_start )
    44 printk("\n[%s] thread %x in process enter to activate thread %x / cycle %d\n",
    45 __FUNCTION__, this->trdid, process->pid, trdid, (uint32_t)tm_start );
     42if( DEBUG_SYS_THREAD_WAKEUP < (uint32_t)tm_start )
     43printk("\n[%s] thread[%x,%x] enter to activate thread %x / cycle %d\n",
     44__FUNCTION__, process->pid, this->trdid, trdid, (uint32_t)tm_start );
    4645#endif
    4746
     
    5655
    5756#if DEBUG_SYSCALLS_ERROR
    58 printk("\n[ERROR] in %s : thread %x in process %x / illegal trdid argument %x\n",
    59 __FUNCTION__, this->trdid, process->pid, trdid );
     57if( DEBUG_SYSCALLS_ERROR < (uint32_t)tm_start )
     58printk("\n[ERROR] in %s : thread[%x,%x] / illegal trdid argument %x\n",
     59__FUNCTION__, process->pid, this->trdid, trdid );
    6060#endif
    6161                this->errno = EINVAL;
     
    7070
    7171#if DEBUG_SYSCALLS_ERROR
    72 printk("\n[ERROR] in %s : thread %x in process %x cannot find thread %x/n",
    73 __FUNCTION__ , this->trdid, process->pid, trdid );
     72if( DEBUG_SYSCALLS_ERROR < (uint32_t)tm_start )
     73printk("\n[ERROR] in %s : thread[%x,%x] / cannot find thread[%x,%x]\n",
     74__FUNCTION__, process->pid, this->trdid, process->pid, trdid );
    7475#endif
    7576        CURRENT_THREAD->errno = EINVAL;
     
    7778    }
    7879
     80    // get target thread cluster and local pointer
     81    thread_t * tgt_ptr = GET_PTR( thread_xp );
     82    cxy_t      tgt_cxy = GET_CXY( thread_xp );
     83
     84    // get state of the target thread alarm
     85    bool_t linked = hal_remote_l32( XPTR( tgt_cxy , &tgt_ptr->alarm.linked ) );
     86
     87    // delete the alarm if active
     88    if( linked ) alarm_stop( thread_xp );
     89
    7990    // unblock target thread
    80     thread_unblock( thread_xp , THREAD_BLOCKED_GLOBAL );
     91    thread_unblock( thread_xp , THREAD_BLOCKED_SLEEP );
    8192
    8293#if (DEBUG_SYS_THREAD_WAKEUP || CONFIG_INSTRUMENTATION_SYSCALLS)
     
    8798#if DEBUG_SYS_THREAD_WAKEUP
    8899if( DEBUG_SYS_THREAD_WAKEUP < tm_end )
    89 printk("\n[%s] thread %x in process %x exit / thread %x activated / cycle %d\n",
    90 __FUNCTION__ , this->trdid, process->pid, trdid, (uint32_t)tm_end );
     100printk("\n[%s] thread[%x,%x] exit / thread[%x,%x] activated / cycle %d\n",
     101__FUNCTION__ , process->pid, this->trdid, process->pid, trdid, (uint32_t)tm_end );
    91102#endif
    92103
  • trunk/kernel/syscalls/sys_timeofday.c

    r637 r683  
    22 * sys_timeofday.c - Get current time
    33 *
    4  * Author    Alain Greiner (2016,2017,2018,2019)
     4 * Author    Alain Greiner      (2016,2017,2018,2019,2020)
    55 *
    66 * Copyright (c) UPMC Sorbonne Universites
     
    3232#include <core.h>
    3333#include <shared_syscalls.h>
    34 
    3534#include <syscalls.h>
    3635
     
    5049        process_t *    process = this->process;
    5150
    52 #if (DEBUG_SYS_TIMEOFDAY || CONFIG_INSTRUMENTATION_SYSCALLS)
     51#if DEBUG_SYS_TIMEOFDAY || DEBUG_SYSCALLS_ERROR || CONFIG_INSTRUMENTATION_SYSCALLS
    5352uint64_t     tm_start = hal_get_cycles();
    5453#endif
    5554
    5655#if DEBUG_SYS_TIMEOFDAY
    57 if( DEBUG_SYS_TIMEOFDAY < tm_start )
     56if( DEBUG_SYS_TIMEOFDAY < (uint32_t)tm_start )
    5857printk("\n[%s] thread[%x,%x] enter / cycle %d\n",
    5958__FUNCTION__, process->pid, this->trdid, (uint32_t)tm_start );
     
    6564
    6665#if DEBUG_SYSCALLS_ERROR
    67 printk("\n[ERROR] in %s for thread %x in process %x : tz argument must be NULL\n",
     66if( DEBUG_SYSCALLS_ERROR < (uint32_t)tm_start )
     67printk("\n[ERROR] in %s : thread[%x,%x] / tz argument must be NULL\n",
    6868__FUNCTION__ , this->trdid , process->pid );
    6969#endif
     
    7979
    8080#if DEBUG_SYSCALLS_ERROR
    81 printk("\n[ERROR] in %s : user buffer tz unmapped / thread %x / process %x\n",
    82 __FUNCTION__ , (intptr_t)tz , this->trdid , process->pid );
     81if( DEBUG_SYSCALLS_ERROR < (uint32_t)tm_start )
     82printk("\n[ERROR] in %s : thread[%x,%x] / user buffer tv unmapped\n",
     83__FUNCTION__ , this->trdid , process->pid , (intptr_t)tz );
    8384#endif
    8485        this->errno = EINVAL;
  • trunk/kernel/syscalls/syscalls.h

    r670 r683  
    3838
    3939/******************************************************************************************
    40  * This function forces the calling thread to sleep, for a fixed number of cycles.
    41  ******************************************************************************************
    42  * cycles   : number of cycles.
    43  *****************************************************************************************/
    44 int sys_alarm( uint32_t cycles );
     40 * This function forces the calling thread to sleep, for a fixed number of seconds.
     41 ******************************************************************************************
     42 * @ seconds   : number of seconds.
     43 *****************************************************************************************/
     44int sys_alarm( uint32_t seconds );
    4545
    4646/******************************************************************************************
     
    229229
    230230/******************************************************************************************
     231 * This generic function implements all the non standard syscalls of type "get_xxx()",
     232 * defined in the <almosmkh.h> and <almosmkh.c> files.
     233 * The operation types mnemonics are defined in the <shared_almos.h> file.
     234 * This function ckecks the syscall arguments, and call the relevant kernel function.
     235 ******************************************************************************************
     236 * @ arg0       : operation type (mnemonics defined in shared_get.h)
     237 * @ arg1       : depends on operation type
     238 * @ arg2       : depends on operation type
     239 * @ arg3       : depends on operation type
     240 * @ return 0 if success / return -1 if illegal argument.
     241 *****************************************************************************************/
     242int sys_get( reg_t   arg0,
     243             reg_t   arg1,
     244             reg_t   arg2,
     245             reg_t   arg3 );
     246
     247/******************************************************************************************
    231248 * This function implements the non-standard "get_best_core" syscall.
    232249 * It selects, in a macro-cluster specified by the <base_cxy> and <level> arguments,
     
    470487 * as a remote_buffer_t, creates two (read and write) file descriptors, and links these
    471488 * two file descriptors to the pipe.
    472  * TODO : the dynamic memory allocation in case of buffer full is not implemented.
     489 * TODO  : the dynamic memory allocation in case of buffer full is not implemented.
     490 * FIXME : wich syscall release the kernel memory allocated by this syscall ?
    473491 ******************************************************************************************
    474492 * @ fd   : pointeur on a 2 slots array of fdid : fd[0] read / fd[1] write.
     
    566584
    567585/******************************************************************************************
    568  * This generic function implements the socket related syscalls.
     586 * This generic function implements all socket related syscalls.
    569587 * The operation types mnemonics are defined in the <shared_socket> file.
    570588 * The supported operations are defined in the <socket.h> & <socket.c> files.
     
    686704
    687705/******************************************************************************************
    688  * This function block the calling thread on the THREAD_BLOCKED_GLOBAL condition,
    689  * and deschedule.
    690  ******************************************************************************************
    691  * @ return 0 if success / returns -1 if failure.
    692  *****************************************************************************************/
    693 int sys_thread_sleep( void );
     706 * This function blocks the calling thread on the THREAD_BLOCKED_SLEEP condition,
     707 * and deschedules. When the <seconds> argument is non-zero, this argument defines
     708 * the sleeping time. When it is zero, the sleeping time is unbounded, and the thread
     709 * must be unblocked by the sys_thread_wakeup() function
     710 ******************************************************************************************
     711 * @ seconds : number of seconds of sleep / No alarm is activated when 0.
     712 * @ return 0 if success / returns -1 if failure.
     713 *****************************************************************************************/
     714int sys_thread_sleep( uint32_t seconds );
    694715
    695716/******************************************************************************************
    696717 * This function unblock the thread identified by its <trdid> from the
    697  * THREAD_BLOCKED_GLOBAL condition.
     718 * THREAD_BLOCKED_SLEEP condition, and cancel the registered alarm if required.
    698719 ******************************************************************************************
    699720 * @ trdid  : target thread identifier.
     
    705726 * This function calls the scheduler for the core running the calling thread.
    706727 ******************************************************************************************
    707  * @ x_size   : [out] number of clusters in a row.
    708  * @ y_size   : [out] number of clusters in a column.
    709  * @ ncores   : [out] number of cores per cluster.
    710728 * @ return always 0.
    711729 *****************************************************************************************/
Note: See TracChangeset for help on using the changeset viewer.