Changeset 438


Ignore:
Timestamp:
Apr 4, 2018, 2:49:02 PM (4 years ago)
Author:
alain
Message:

Fix a bug in scheduler related to RPC blocking.

Location:
trunk
Files:
64 edited

Legend:

Unmodified
Added
Removed
  • trunk/hal/tsar_mips32/core/hal_exception.c

    r437 r438  
    201201    }
    202202
    203 #if CONFIG_DEBUG_HAL_EXCEPTIONS
     203#if DEBUG_HAL_EXCEPTIONS
    204204uint32_t cycle = (uint32_t)hal_get_cycles();
    205 if( CONFIG_DEBUG_HAL_EXCEPTIONS < cycle )
     205if( DEBUG_HAL_EXCEPTIONS < cycle )
    206206printk("\n[DBG] %s : thread %x enter / is_ins %d / %s / vaddr %x / cycle %d\n",
    207207__FUNCTION__, this, is_ins, hal_mmu_exception_str(excp_code), bad_vaddr, cycle );
     
    229229            {
    230230
    231 #if CONFIG_DEBUG_HAL_EXCEPTIONS
     231#if DEBUG_HAL_EXCEPTIONS
    232232cycle = (uint32_t)hal_get_cycles();
    233 if( CONFIG_DEBUG_HAL_EXCEPTIONS < cycle )
     233if( DEBUG_HAL_EXCEPTIONS < cycle )
    234234printk("\n[DBG] %s : thread %x exit / page-fault handled for vaddr = %x\n",
    235235__FUNCTION__ , this , bad_vaddr );
     
    268268                {
    269269
    270 #if CONFIG_DEBUG_HAL_EXCEPTIONS
     270#if DEBUG_HAL_EXCEPTIONS
    271271cycle = (uint32_t)hal_get_cycles();
    272 if( CONFIG_DEBUG_HAL_EXCEPTIONS < cycle )
     272if( DEBUG_HAL_EXCEPTIONS < cycle )
    273273printk("\n[DBG] %s : thread %x exit / copy-on-write handled for vaddr = %x\n",
    274274__FUNCTION__ , this , bad_vaddr );
     
    390390    excPC          = uzone[UZ_EPC];
    391391
    392 #if CONFIG_DEBUG_HAL_EXCEPTIONS
     392#if DEBUG_HAL_EXCEPTIONS
    393393uint32_t cycle = (uint32_t)hal_get_cycles();
    394 if( CONFIG_DEBUG_HAL_EXCEPTIONS < cycle )
     394if( DEBUG_HAL_EXCEPTIONS < cycle )
    395395printk("\n[DBG] %s : thread %x enter / core[%x,%d] / pid %x / epc %x / xcode %x / cycle %d\n",
    396396__FUNCTION__, this, local_cxy, this->core->lid, this->process->pid, excPC, excCode, cycle );
     
    450450    }
    451451
    452 #if CONFIG_DEBUG_HAL_EXCEPTIONS
     452#if DEBUG_HAL_EXCEPTIONS
    453453cycle = (uint32_t)hal_get_cycles();
    454 if( CONFIG_DEBUG_HAL_EXCEPTIONS < cycle )
     454if( DEBUG_HAL_EXCEPTIONS < cycle )
    455455printk("\n[DBG] %s : thread %x exit / core[%x,%d] / pid %x / epc %x / xcode %x / cycle %d\n",
    456456__FUNCTION__, this, local_cxy, this->core->lid, this->process->pid, excPC, excCode, cycle );
  • trunk/hal/tsar_mips32/core/hal_gpt.c

    r432 r438  
    132132    xptr_t     page_xp;
    133133
    134 #if CONFIG_DEBUG_GPT_ACCESS
     134#if DEBUG_GPT_ACCESS
    135135uint32_t cycle = (uint32_t)hal_get_cycles;
    136 if( CONFIG_DEBUG_GPT_ACCESS < cycle )
     136if( DEBUG_GPT_ACCESS < cycle )
    137137printk("\n[DBG] %s : thread %x enter / cycle %d\n",
    138138__FUNCTION__, CURRENT_THREAD, cycle );
     
    161161        gpt->ppn  = ppm_page2ppn( page_xp );
    162162
    163 #if CONFIG_DEBUG_GPT_ACCESS
     163#if DEBUG_GPT_ACCESS
    164164cycle = (uint32_t)hal_get_cycles;
    165 if( CONFIG_DEBUG_GPT_ACCESS < cycle )
     165if( DEBUG_GPT_ACCESS < cycle )
    166166printk("\n[DBG] %s : thread %x exit / cycle %d\n",
    167167__FUNCTION__, CURRENT_THREAD, cycle );
     
    340340    uint32_t            tsar_attr;           // PTE attributes for TSAR MMU
    341341
    342 #if CONFIG_DEBUG_GPT_ACCESS
     342#if DEBUG_GPT_ACCESS
    343343uint32_t cycle = (uint32_t)hal_get_cycles;
    344 if( CONFIG_DEBUG_GPT_ACCESS < cycle )
     344if( DEBUG_GPT_ACCESS < cycle )
    345345printk("\n[DBG] %s : thread %x enter / vpn %x / attr %x / ppn %x / cycle %d\n",
    346346__FUNCTION__, CURRENT_THREAD, vpn, attr, ppn, cycle );
     
    357357    tsar_attr = gpt2tsar( attr );
    358358
    359 #if (CONFIG_DEBUG_GPT_ACCESS & 1)
    360 if( CONFIG_DEBUG_GPT_ACCESS < cycle )
     359#if (DEBUG_GPT_ACCESS & 1)
     360if( DEBUG_GPT_ACCESS < cycle )
    361361printk("\n[DBG] %s : thread %x / vpn %x / &pt1 %x / tsar_attr %x\n",
    362362__FUNCTION__, CURRENT_THREAD, vpn, pt1, tsar_attr );
     
    392392        pte1 = *pte1_ptr;
    393393       
    394 #if (CONFIG_DEBUG_GPT_ACCESS & 1)
    395 if( CONFIG_DEBUG_GPT_ACCESS < cycle )
     394#if (DEBUG_GPT_ACCESS & 1)
     395if( DEBUG_GPT_ACCESS < cycle )
    396396printk("\n[DBG] %s : thread %x / vpn %x / current_pte1 %x\n",
    397397__FUNCTION__, CURRENT_THREAD, vpn, pte1 );
     
    437437            pt2     = (uint32_t *)GET_PTR( ppm_ppn2base( pt2_ppn ) );
    438438
    439 #if (CONFIG_DEBUG_GPT_ACCESS & 1)
    440 if( CONFIG_DEBUG_GPT_ACCESS < cycle )
     439#if (DEBUG_GPT_ACCESS & 1)
     440if( DEBUG_GPT_ACCESS < cycle )
    441441printk("\n[DBG] %s : thread %x / vpn %x / pte1 %x / &pt2 %x\n",
    442442__FUNCTION__, CURRENT_THREAD, vpn, pte1, pt2 );
     
    452452        hal_fence();
    453453
    454 #if CONFIG_DEBUG_GPT_ACCESS
     454#if DEBUG_GPT_ACCESS
    455455cycle = (uint32_t)hal_get_cycles;
    456 if( CONFIG_DEBUG_GPT_ACCESS < cycle )
     456if( DEBUG_GPT_ACCESS < cycle )
    457457printk("\n[DBG] %s : thread %x exit / vpn %x / pte2_attr %x / pte2_ppn %x / cycle %d\n",
    458458__FUNCTION__, CURRENT_THREAD, vpn, pt2[2 * ix2], pt2[2 * ix2 + 1], cycle );
     
    762762    ppn_t        dst_pt2_ppn;
    763763
    764 #if CONFIG_DEBUG_GPT_ACCESS
     764#if DEBUG_GPT_ACCESS
    765765uint32_t cycle = (uint32_t)hal_get_cycles;
    766 if( CONFIG_DEBUG_GPT_ACCESS < cycle )
     766if( DEBUG_GPT_ACCESS < cycle )
    767767printk("\n[DBG] %s : thread %x enter / vpn %x / cycle %d\n",
    768768__FUNCTION__, CURRENT_THREAD, vpn, cycle );
     
    853853            *ppn    = src_pte2_ppn;
    854854       
    855 #if CONFIG_DEBUG_GPT_ACCESS
     855#if DEBUG_GPT_ACCESS
    856856cycle = (uint32_t)hal_get_cycles;
    857 if( CONFIG_DEBUG_GPT_ACCESS < cycle )
     857if( DEBUG_GPT_ACCESS < cycle )
    858858printk("\n[DBG] %s : thread %x exit / copy done for vpn %x / cycle %d\n",
    859859__FUNCTION__, CURRENT_THREAD, vpn, cycle );
     
    870870    *ppn    = 0;
    871871   
    872 #if CONFIG_DEBUG_GPT_ACCESS
     872#if DEBUG_GPT_ACCESS
    873873cycle = (uint32_t)hal_get_cycles;
    874 if( CONFIG_DEBUG_GPT_ACCESS < cycle )
     874if( DEBUG_GPT_ACCESS < cycle )
    875875printk("\n[DBG] %s : thread %x exit / nothing done for vpn %x / cycle %d\n",
    876876__FUNCTION__, CURRENT_THREAD, vpn, cycle );
  • trunk/hal/tsar_mips32/core/hal_kentry.S

    r432 r438  
    200200        mtc0    $3,         $12                         # set new c0_sr
    201201
    202 #--------------------------
    203 #if CONFIG_DEBUG_HAL_KENTRY
     202#--------------------
     203#if DEBUG_HAL_KENTRY
    204204
    205205    # display "enter" message
     
    349349    sw      $5,    8($4)                # current uzone pointer <= previous
    350350
    351 #--------------------------
    352 #if CONFIG_DEBUG_HAL_KENTRY
     351#-------------------
     352#if DEBUG_HAL_KENTRY
    353353
    354354    # display "exit" message
  • trunk/hal/tsar_mips32/drivers/soclib_bdv.c

    r437 r438  
    7575    ioc_xp   = (xptr_t)hal_remote_lwd( XPTR( th_cxy , &th_ptr->ioc_cmd.dev_xp ) );
    7676
    77 #if CONFIG_DEBUG_HAL_IOC_RX
    78 uint32_t cycle = (uint32_t)hal_get_cycles();
    79 if( (CONFIG_DEBUG_HAL_IOC_RX < cycle) && (cmd_type != IOC_WRITE ) )
     77#if DEBUG_HAL_IOC_RX
     78uint32_t cycle = (uint32_t)hal_get_cycles();
     79if( (DEBUG_HAL_IOC_RX < cycle) && (cmd_type != IOC_WRITE ) )
    8080printk("\n[DBG] %s : thread %x enter for RX / cycle %d\n",
    8181__FUNCTION__ , CURRENT_THREAD , cycle );
    8282#endif
    8383
    84 #if CONFIG_DEBUG_HAL_IOC_TX
    85 uint32_t cycle = (uint32_t)hal_get_cycles();
    86 if( (CONFIG_DEBUG_HAL_IOC_TX < cycle) && (cmd_type == IOC_WRITE) )
     84#if DEBUG_HAL_IOC_TX
     85uint32_t cycle = (uint32_t)hal_get_cycles();
     86if( (DEBUG_HAL_IOC_TX < cycle) && (cmd_type == IOC_WRITE) )
    8787printk("\n[DBG] %s : thread %x enter for TX / cycle %d\n",
    8888__FUNCTION__ , CURRENT_THREAD , cycle );
     
    152152    }
    153153   
    154 #if CONFIG_DEBUG_HAL_IOC_RX
     154#if DEBUG_HAL_IOC_RX
    155155cycle = (uint32_t)hal_get_cycles();
    156 if( (CONFIG_DEBUG_HAL_IOC_RX < cycle) && (cmd_type != TXT_WRITE) )
     156if( (DEBUG_HAL_IOC_RX < cycle) && (cmd_type != TXT_WRITE) )
    157157printk("\n[DBG] %s : thread %x exit after RX / cycle %d\n",
    158158__FUNCTION__ , CURRENT_THREAD , cycle );
    159159#endif
    160160
    161 #if CONFIG_DEBUG_HAL_IOC_TX
     161#if DEBUG_HAL_IOC_TX
    162162cycle = (uint32_t)hal_get_cycles();
    163 if( (CONFIG_DEBUG_HAL_IOC_TX < cycle) && (cmd_type == TXT_WRITE) )
     163if( (DEBUG_HAL_IOC_TX < cycle) && (cmd_type == TXT_WRITE) )
    164164printk("\n[DBG] %s : thread %x exit after TX / cycle %d\n",
    165165__FUNCTION__ , CURRENT_THREAD , cycle );
     
    199199            error = (status != BDV_READ_SUCCESS);
    200200
    201 #if CONFIG_DEBUG_HAL_IOC_RX
    202 uint32_t cycle = (uint32_t)hal_get_cycles();
    203 if( CONFIG_DEBUG_HAL_IOC_RX < cycle )
     201#if DEBUG_HAL_IOC_RX
     202uint32_t cycle = (uint32_t)hal_get_cycles();
     203if( DEBUG_HAL_IOC_RX < cycle )
    204204printk("\n[DBG] %s : IOC_IRQ / RX transfer / client %x / server %x / cycle %d\n",
    205205__FUNCTION__, client_ptr , chdev->server , cycle );
     
    211211            error = (status != BDV_WRITE_SUCCESS);
    212212
    213 #if CONFIG_DEBUG_HAL_IOC_TX
    214 uint32_t cycle = (uint32_t)hal_get_cycles();
    215 if( CONFIG_DEBUG_HAL_IOC_TX < cycle )
     213#if DEBUG_HAL_IOC_TX
     214uint32_t cycle = (uint32_t)hal_get_cycles();
     215if( DEBUG_HAL_IOC_TX < cycle )
    216216printk("\n[DBG] %s : IOC_IRQ / RX transfer / client %x / server %x / cycle %d\n",
    217217__FUNCTION__, client_ptr , chdev->server , cycle );
  • trunk/hal/tsar_mips32/drivers/soclib_pic.c

    r435 r438  
    130130                           &pti_status );
    131131
    132 #if CONFIG_DEBUG_HAL_IRQS
     132#if DEBUG_HAL_IRQS
    133133uint32_t cycle = (uint32_t)hal_get_cycles();
    134 if (CONFIG_DEBUG_HAL_IRQS < cycle )
     134if (DEBUG_HAL_IRQS < cycle )
    135135printk("\n[DBG] %s : core[%x,%d] enter / WTI = %x / HWI = %x / PTI = %x / cycle %d\n",
    136136__FUNCTION__ , local_cxy , core->lid , wti_status , hwi_status , pti_status, cycle );
     
    143143        index = wti_status - 1;
    144144
     145        ////////////////////////////////////////////////////////
    145146        if( index < LOCAL_CLUSTER->cores_nr )   // it is an IPI
    146147        {
    147148            assert( (index == core->lid) , __FUNCTION__ , "illegal IPI index" );
    148149
    149 #if CONFIG_DEBUG_HAL_IRQS
    150 if (CONFIG_DEBUG_HAL_IRQS < cycle )
     150#if DEBUG_HAL_IRQS
     151if (DEBUG_HAL_IRQS < cycle )
    151152printk("\n[DBG] %s : core[%x,%d] received an IPI\n", __FUNCTION__ , local_cxy , core->lid );
    152153#endif
    153             // acknowledge WTI (this require an XCU read)
     154            // acknowledge IRQ (this require an XCU read)
    154155            uint32_t   ack  = xcu_base[(XCU_WTI_REG << 5) | core->lid];
    155            
     156
    156157            // check RPC FIFO,  and activate or create a RPC thread
    157158            // condition is always true, but we must use the ack value
    158159            if( ack + 1 ) rpc_check();
    159160        }
    160         else                                    // it is an external device
     161        ////////////////////////////////////////////////////////////////
     162        else                                    // it is an external IRQ
    161163        {
    162164            // get pointer on source chdev
     
    171173
    172174                // disable WTI in local XCU controller
    173                 uint32_t * base = soclib_pic_xcu_base();
    174                 base[(XCU_MSK_WTI_DISABLE << 5) | core->lid] = 1 << core->lid;
     175                xcu_base[(XCU_MSK_WTI_DISABLE << 5) | core->lid] = 1 << core->lid;
     176
     177                hal_fence();
    175178            }
    176179            else                                 // call relevant ISR
    177180            {
    178181
    179 #if CONFIG_DEBUG_HAL_IRQS
    180 if (CONFIG_DEBUG_HAL_IRQS < cycle )
     182#if DEBUG_HAL_IRQS
     183if (DEBUG_HAL_IRQS < cycle )
    181184printk("\n[DBG] %s : core[%x,%d] received external WTI %d\n",
    182185__FUNCTION__ , local_cxy , core->lid , index );
     
    188191        }
    189192
    190         if( hwi_status )      // pending HWI
     193    /////////////////////////////////////////////////////////////
     194        if( hwi_status )                     // It is an Internal IRQ
    191195        {
    192196        index = hwi_status - 1;
     
    204208            // disable HWI in local XCU controller
    205209            xcu_base[(XCU_MSK_HWI_DISABLE << 5) | core->lid] = 1 << core->lid;
     210
     211            hal_fence();
    206212                }
    207213        else                    // call relevant ISR
    208214        {
    209215
    210 #if CONFIG_DEBUG_HAL_IRQS
    211 if (CONFIG_DEBUG_HAL_IRQS < cycle )
     216#if DEBUG_HAL_IRQS
     217if (DEBUG_HAL_IRQS < cycle )
    212218printk("\n[DBG] %s : core[%x,%d] received HWI %d\n",
    213219__FUNCTION__ , local_cxy , core->lid , index );
     
    217223        }
    218224        }
    219 
    220     if( pti_status )      // pending PTI
     225    ///////////////////////////////////////////////////////
     226    if( pti_status )                   // It is a Timer IRQ
    221227        {
    222228        index = pti_status - 1;
     
    224230        assert( (index == core->lid) , __FUNCTION__ , "unconsistent PTI index\n");
    225231
    226 #if CONFIG_DEBUG_HAL_IRQS
    227 if (CONFIG_DEBUG_HAL_IRQS < cycle )
     232#if DEBUG_HAL_IRQS
     233if (DEBUG_HAL_IRQS < cycle )
    228234printk("\n[DBG] %s : core[%x,%d] received PTI %d\n",
    229235__FUNCTION__ , core->lid , local_cxy , index );
    230236#endif
    231         // acknowledge PTI (this require a read access to XCU)
     237        // acknowledge IRQ (this require a read access to XCU)
    232238        uint32_t   ack  = xcu_base[(XCU_PTI_ACK << 5) | core->lid];
    233239
     
    359365{
    360366
    361 #if CONFIG_DEBUG_HAL_IRQS
     367#if DEBUG_HAL_IRQS
    362368uint32_t cycle = (uint32_t)hal_get_cycles();
    363 if( CONFIG_DEBUG_HAL_IRQS < cycle )
     369if( DEBUG_HAL_IRQS < cycle )
    364370printk("\n[DBG] %s : thread %x enter for core[%x,%d] / cycle %d\n",
    365371__FUNCTION__ , CURRENT_THREAD , local_cxy , lid , cycle );
     
    422428        ((soclib_pic_core_t *)core->pic_extend)->wti_vector[wti_id] = src_chdev;
    423429
    424 #if CONFIG_DEBUG_HAL_IRQS
    425 if( CONFIG_DEBUG_HAL_IRQS < cycle )
     430#if DEBUG_HAL_IRQS
     431if( DEBUG_HAL_IRQS < cycle )
    426432printk("\n[DBG] %s : %s / channel = %d / rx = %d / hwi_id = %d / wti_id = %d / cluster = %x\n",
    427433__FUNCTION__ , chdev_func_str( func ) , channel , is_rx , hwi_id , wti_id , local_cxy );
     
    444450        ((soclib_pic_core_t *)core->pic_extend)->wti_vector[hwi_id] = src_chdev;
    445451
    446 #if CONFIG_DEBUG_HAL_IRQS
    447 if( CONFIG_DEBUG_HAL_IRQS < cycle )
     452#if DEBUG_HAL_IRQS
     453if( DEBUG_HAL_IRQS < cycle )
    448454printk("\n[DBG] %s : %s / channel = %d / hwi_id = %d / cluster = %x\n",
    449455__FUNCTION__ , chdev_func_str( func ) , channel , hwi_id , local_cxy );
  • trunk/hal/tsar_mips32/drivers/soclib_tty.c

    r436 r438  
    3030#include <hal_special.h>
    3131
    32 #if (CONFIG_DEBUG_SYS_READ & 1)
     32#if (DEBUG_SYS_READ & 1)
    3333extern uint32_t  enter_tty_cmd_read;
    3434extern uint32_t  exit_tty_cmd_read;
     
    3838#endif
    3939
    40 #if (CONFIG_DEBUG_SYS_WRITE & 1)
     40#if (DEBUG_SYS_WRITE & 1)
    4141extern uint32_t  enter_tty_cmd_write;
    4242extern uint32_t  exit_tty_cmd_write;
     
    115115    xptr_t   error_xp = XPTR( th_cxy , &th_ptr->txt_cmd.error );
    116116
    117 #if (CONFIG_DEBUG_SYS_READ & 1)
     117#if (DEBUG_SYS_READ & 1)
    118118if( type == TXT_READ) enter_tty_cmd_read = (uint32_t)hal_get_cycles();
    119119#endif
    120120
    121 #if (CONFIG_DEBUG_SYS_WRITE & 1)
     121#if (DEBUG_SYS_WRITE & 1)
    122122if( type == TXT_WRITE) enter_tty_cmd_write = (uint32_t)hal_get_cycles();
    123123#endif
    124124
    125 #if CONFIG_DEBUG_HAL_TXT_RX
     125#if DEBUG_HAL_TXT_RX
    126126uint32_t cycle = (uint32_t)hal_get_cycles();
    127 if( (CONFIG_DEBUG_HAL_TXT_RX < cycle) && (type == TXT_READ) )
     127if( (DEBUG_HAL_TXT_RX < cycle) && (type == TXT_READ) )
    128128printk("\n[DBG] %s : thread %x enter for RX / cycle %d\n",
    129129__FUNCTION__ , CURRENT_THREAD , cycle );
    130130#endif
    131131
    132 #if CONFIG_DEBUG_HAL_TXT_TX
     132#if DEBUG_HAL_TXT_TX
    133133uint32_t cycle = (uint32_t)hal_get_cycles();
    134 if( (CONFIG_DEBUG_HAL_TXT_TX < cycle) && (type == TXT_WRITE) )
     134if( (DEBUG_HAL_TXT_TX < cycle) && (type == TXT_WRITE) )
    135135printk("\n[DBG] %s : thread %x enter for TX / cycle %d\n",
    136136__FUNCTION__ , CURRENT_THREAD , cycle );
     
    238238    }
    239239
    240 #if CONFIG_DEBUG_HAL_TXT_RX
     240#if DEBUG_HAL_TXT_RX
    241241cycle = (uint32_t)hal_get_cycles();
    242 if( (CONFIG_DEBUG_HAL_TXT_RX < cycle) && (type == TXT_READ) )
     242if( (DEBUG_HAL_TXT_RX < cycle) && (type == TXT_READ) )
    243243printk("\n[DBG] %s : thread %x exit after RX / cycle %d\n",
    244244__FUNCTION__ , CURRENT_THREAD , cycle );
    245245#endif
    246246
    247 #if CONFIG_DEBUG_HAL_TXT_TX
     247#if DEBUG_HAL_TXT_TX
    248248cycle = (uint32_t)hal_get_cycles();
    249 if( (CONFIG_DEBUG_HAL_TXT_TX < cycle) && (type == TXT_WRITE) )
     249if( (DEBUG_HAL_TXT_TX < cycle) && (type == TXT_WRITE) )
    250250printk("\n[DBG] %s : thread %x exit after TX / cycle %d\n",
    251251__FUNCTION__ , CURRENT_THREAD , cycle );
    252252#endif
    253253
    254 #if (CONFIG_DEBUG_SYS_READ & 1)
     254#if (DEBUG_SYS_READ & 1)
    255255if( type == TXT_READ ) exit_tty_cmd_read = (uint32_t)hal_get_cycles();
    256256#endif
    257257
    258 #if (CONFIG_DEBUG_SYS_WRITE & 1)
     258#if (DEBUG_SYS_WRITE & 1)
    259259if( type == TXT_WRITE ) exit_tty_cmd_write = (uint32_t)hal_get_cycles();
    260260#endif
     
    288288    server_lid = server->core->lid;
    289289
    290 #if (CONFIG_DEBUG_SYS_READ & 1)
     290#if (DEBUG_SYS_READ & 1)
    291291if( is_rx ) enter_tty_isr_read = (uint32_t)hal_get_cycles();
    292292#endif
    293293
    294 #if (CONFIG_DEBUG_SYS_WRITE & 1)
     294#if (DEBUG_SYS_WRITE & 1)
    295295if( is_rx == 0 ) enter_tty_isr_write = (uint32_t)hal_get_cycles();
    296296#endif
    297297
    298 #if CONFIG_DEBUG_HAL_TXT_RX
     298#if DEBUG_HAL_TXT_RX
    299299uint32_t cycle = (uint32_t)hal_get_cycles();
    300 if( (CONFIG_DEBUG_HAL_TXT_RX < cycle) && is_rx )
     300if( (DEBUG_HAL_TXT_RX < cycle) && is_rx )
    301301printk("\n[DBG] %s : enter for RX / cycle %d\n", __FUNCTION__ , cycle );
    302302#endif
    303303
    304 #if CONFIG_DEBUG_HAL_TXT_TX
     304#if DEBUG_HAL_TXT_TX
    305305uint32_t cycle = (uint32_t)hal_get_cycles();
    306 if( (CONFIG_DEBUG_HAL_TXT_TX < cycle) && (is_rx == 0) )
     306if( (DEBUG_HAL_TXT_TX < cycle) && (is_rx == 0) )
    307307printk("\n[DBG] %s : enter for TX / cycle %d\n", __FUNCTION__ , cycle );
    308308#endif
     
    459459    hal_fence();
    460460
    461 #if CONFIG_DEBUG_HAL_TXT_RX
     461#if DEBUG_HAL_TXT_RX
    462462cycle = (uint32_t)hal_get_cycles();
    463 if( (CONFIG_DEBUG_HAL_TXT_RX < cycle) && is_rx )
     463if( (DEBUG_HAL_TXT_RX < cycle) && is_rx )
    464464printk("\n[DBG] %s : exit after RX / cycle %d\n", __FUNCTION__, cycle );
    465465#endif
    466466
    467 #if CONFIG_DEBUG_HAL_TXT_TX
     467#if DEBUG_HAL_TXT_TX
    468468cycle = (uint32_t)hal_get_cycles();
    469 if( (CONFIG_DEBUG_HAL_TXT_TX < cycle) && (is_rx == 0) )
     469if( (DEBUG_HAL_TXT_TX < cycle) && (is_rx == 0) )
    470470printk("\n[DBG] %s : exit after TX / cycle %d\n", __FUNCTION__, cycle );
    471471#endif
    472472
    473 #if (CONFIG_DEBUG_SYS_READ & 1)
     473#if (DEBUG_SYS_READ & 1)
    474474if( is_rx ) exit_tty_isr_read = (uint32_t)hal_get_cycles();
    475475#endif
    476476
    477 #if (CONFIG_DEBUG_SYS_WRITE & 1)
     477#if (DEBUG_SYS_WRITE & 1)
    478478if( is_rx == 0 ) exit_tty_isr_write = (uint32_t)hal_get_cycles();
    479479#endif
  • trunk/kernel/devices/dev_fbf.c

    r437 r438  
    177177{
    178178
    179 #if CONFIG_DEBUG_DEV_FBF_RX
     179#if DEBUG_DEV_FBF_RX
    180180uint32_t cycle = (uint32_t)hal_get_cycle();
    181 if( CONFIG_DEBUG_DEV_FBF_RX < cycle )
     181if( DEBUG_DEV_FBF_RX < cycle )
    182182printk("\n[DBG] %s : thread %x enter / process %x / vaddr %x / size %x\n",
    183183__FUNCTION__ , this, this->process->pid , buffer , buf_paddr );
     
    186186    return dev_fbf_access( false , buffer , length , offset );
    187187
    188 #if CONFIG_DEBUG_DEV_FBF_RX
     188#if DEBUG_DEV_FBF_RX
    189189cycle = (uint32_t)hal_get_cycle();
    190 if( CONFIG_DEBUG_DEV_FBF_RX < cycle )
     190if( DEBUG_DEV_FBF_RX < cycle )
    191191printk("\n[DBG] %s : thread %x exit / process %x / vaddr %x / size %x\n",
    192192__FUNCTION__ , this, this->process->pid , buffer , buf_paddr );
     
    201201{
    202202
    203 #if CONFIG_DEBUG_DEV_FBF_TX
     203#if DEBUG_DEV_FBF_TX
    204204uint32_t cycle = (uint32_t)hal_get_cycle();
    205 if( CONFIG_DEBUG_DEV_FBF_TX < cycle )
     205if( DEBUG_DEV_FBF_TX < cycle )
    206206printk("\n[DBG] %s : thread %x enter / process %x / vaddr %x / size %x\n",
    207207__FUNCTION__ , this, this->process->pid , buffer , buf_paddr );
     
    210210    return dev_fbf_access( true , buffer , length , offset );
    211211
    212 #if CONFIG_DEBUG_DEV_FBF_RX
     212#if DEBUG_DEV_FBF_RX
    213213cycle = (uint32_t)hal_get_cycle();
    214 if( CONFIG_DEBUG_DEV_FBF_RX < cycle )
     214if( DEBUG_DEV_FBF_RX < cycle )
    215215printk("\n[DBG] %s : thread %x exit / process %x / vaddr %x / size %x\n",
    216216__FUNCTION__ , this, this->process->pid , buffer , buf_paddr );
  • trunk/kernel/devices/dev_ioc.c

    r437 r438  
    136136{
    137137
    138 #if CONFIG_DEBUG_DEV_IOC_RX
     138#if DEBUG_DEV_IOC_RX
    139139uint32_t cycle = (uint32_t)hal_get_cycles();
    140 if( CONFIG_DEBUG_DEV_IOC_RX < cycle )
     140if( DEBUG_DEV_IOC_RX < cycle )
    141141printk("\n[DBG] %s : thread %x enters / lba  %x / buffer %x / cycle %d\n",
    142142__FUNCTION__ , this, lba, buffer, cycle );
     
    145145    return dev_ioc_access( IOC_READ , buffer , lba , count );
    146146
    147 #if CONFIG_DEBUG_DEV_IOC_RX
     147#if DEBUG_DEV_IOC_RX
    148148cycle = (uint32_t)hal_get_cycles();
    149 if( CONFIG_DEBUG_DEV_IOC_RX < cycle )
     149if( DEBUG_DEV_IOC_RX < cycle )
    150150printk("\n[DBG] %s : thread %x exit / lba  %x / buffer %x / cycle %d\n",
    151151__FUNCTION__ , this, lba, buffer, cycle );
     
    160160{
    161161
    162 #if CONFIG_DEBUG_DEV_IOC_TX
     162#if DEBUG_DEV_IOC_TX
    163163uint32_t cycle = (uint32_t)hal_get_cycles();
    164 if( CONFIG_DEBUG_DEV_IOC_TX < cycle )
     164if( DEBUG_DEV_IOC_TX < cycle )
    165165printk("\n[DBG] %s : thread %x enters / lba  %x / buffer %x / cycle %d\n",
    166166__FUNCTION__ , this, lba, buffer, cycle );
     
    169169    return dev_ioc_access( IOC_WRITE , buffer , lba , count );
    170170
    171 #if CONFIG_DEBUG_DEV_IOC_TX
     171#if DEBUG_DEV_IOC_TX
    172172cycle = (uint32_t)hal_get_cycles();
    173 if( CONFIG_DEBUG_DEV_IOC_TX < cycle )
     173if( DEBUG_DEV_IOC_TX < cycle )
    174174printk("\n[DBG] %s : thread %x exit / lba  %x / buffer %x / cycle %d\n",
    175175__FUNCTION__ , this, lba, buffer, cycle );
     
    186186    thread_t * this = CURRENT_THREAD;
    187187
    188 #if CONFIG_DEBUG_DEV_IOC_RX
     188#if DEBUG_DEV_IOC_RX
    189189uint32_t cycle = (uint32_t)hal_get_cycles();
    190 if( CONFIG_DEBUG_DEV_IOC_RX < cycle )
     190if( DEBUG_DEV_IOC_RX < cycle )
    191191printk("\n[DBG] %s : thread %x enters / lba  %x / buffer %x / cycle %d\n",
    192192__FUNCTION__ , this, lba, buffer, cycle );
     
    227227    dev_pic_enable_irq( lid , ioc_xp );
    228228
    229 #if CONFIG_DEBUG_DEV_IOC_RX
     229#if DEBUG_DEV_IOC_RX
    230230cycle = (uint32_t)hal_get_cycles();
    231 if( CONFIG_DEBUG_DEV_IOC_RX < cycle )
     231if( DEBUG_DEV_IOC_RX < cycle )
    232232printk("\n[DBG] %s : thread %x exit / lba  %x / buffer %x / cycle %d\n",
    233233__FUNCTION__ , this, lba, buffer, cycle );
  • trunk/kernel/devices/dev_mmc.c

    r437 r438  
    9999    thread_t * this = CURRENT_THREAD;
    100100
    101 #if CONFIG_DEBUG_DEV_MMC
     101#if DEBUG_DEV_MMC
    102102uint32_t cycle = (uint32_t)hal_get_cycles();
    103 if( CONFIG_DEBUG_DEV_MMC < cycle )
     103if( DEBUG_DEV_MMC < cycle )
    104104printk("\n[DBG] %s : thread %x enters / process %x / buf_xp = %l\n",
    105105__FUNCTION__, this, this->process->pid , buf_xp );
     
    128128    error = dev_mmc_access( this );
    129129
    130 #if CONFIG_DEBUG_DEV_MMC
     130#if DEBUG_DEV_MMC
    131131cycle = (uint32_t)hal_get_cycles();
    132 if( CONFIG_DEBUG_DEV_MMC < cycle )
     132if( DEBUG_DEV_MMC < cycle )
    133133printk("\n[DBG] %s : thread %x exit / process %x / buf_xp = %l\n",
    134134__FUNCTION__, this, this->process->pid , buf_xp );
     
    147147    thread_t * this = CURRENT_THREAD;
    148148
    149 #if CONFIG_DEBUG_DEV_MMC
     149#if DEBUG_DEV_MMC
    150150uint32_t cycle = (uint32_t)hal_get_cycles();
    151 if( CONFIG_DEBUG_DEV_MMC < cycle )
     151if( DEBUG_DEV_MMC < cycle )
    152152printk("\n[DBG] %s : thread %x enters / process %x / buf_xp = %l\n",
    153153__FUNCTION__, this, this->process->pid , buf_xp );
     
    176176    error = dev_mmc_access( this );
    177177
    178 #if CONFIG_DEBUG_DEV_MMC
     178#if DEBUG_DEV_MMC
    179179cycle = (uint32_t)hal_get_cycles();
    180 if( CONFIG_DEBUG_DEV_MMC < cycle )
     180if( DEBUG_DEV_MMC < cycle )
    181181printk("\n[DBG] %s : thread %x exit / process %x / buf_xp = %l\n",
    182182__FUNCTION__, this, this->process->pid , buf_xp );
  • trunk/kernel/devices/dev_nic.c

    r437 r438  
    9999    core_t * core = thread_ptr->core;
    100100
    101 #if CONFIG_DEBUG_DEV_NIC_RX
     101#if DEBUG_DEV_NIC_RX
    102102uint32_t cycle = (uint32_t)hal_get_cycles();
    103 if( CONFIG_DEBUG_DEV_NIC_RX < cycle )
     103if( DEBUG_DEV_NIC_RX < cycle )
    104104printk("\n[DBG] %s : thread %x enters for packet %x in cluster %x\n",
    105105__FUNCTION__ , thread_ptr , pkd , local_cxy );
     
    153153    pkd->length = thread_ptr->nic_cmd.length;
    154154
    155 #if CONFIG_DEBUG_DEV_NIC_RX
     155#if DEBUG_DEV_NIC_RX
    156156cycle = (uint32_t)hal_get_cycles();
    157 if( CONFIG_DEBUG_DEV_NIC_RX < cycle )
     157if( DEBUG_DEV_NIC_RX < cycle )
    158158printk("\n[DBG] %s : thread %x exit for packet %x in cluster %x\n",
    159159__FUNCTION__ , thread_ptr , pkd , local_cxy );
     
    177177    core_t * core = thread_ptr->core;
    178178
    179 #if CONFIG_DEBUG_DEV_NIC_RX
     179#if DEBUG_DEV_NIC_RX
    180180uint32_t cycle = (uint32_t)hal_get_cycles();
    181 if( CONFIG_DEBUG_DEV_NIC_RX < cycle )
     181if( DEBUG_DEV_NIC_RX < cycle )
    182182printk("\n[DBG] %s : thread %x enters for packet %x in cluster %x\n",
    183183__FUNCTION__ , thread_ptr , pkd , local_cxy );
     
    229229    if( error ) return error;
    230230
    231 #if CONFIG_DEBUG_DEV_NIC_RX
     231#if DEBUG_DEV_NIC_RX
    232232cycle = (uint32_t)hal_get_cycles();
    233 if( CONFIG_DEBUG_DEV_NIC_RX < cycle )
     233if( DEBUG_DEV_NIC_RX < cycle )
    234234printk("\n[DBG] %s : thread %x exit for packet %x in cluster %x\n",
    235235__FUNCTION__ , thread_ptr , pkd , local_cxy );
  • trunk/kernel/devices/dev_pic.c

    r437 r438  
    8686{
    8787
    88 #if CONFIG_DEBUG_DEV_PIC
    89 uint32_t cycle = (uint32_t)hal_get_cycles();
    90 if( CONFIG_DEBUG_DEV_PIC < cycle )
     88#if DEBUG_DEV_PIC
     89uint32_t cycle = (uint32_t)hal_get_cycles();
     90if( DEBUG_DEV_PIC < cycle )
    9191printk("\n[DBG] %s : core[%x,%d] / src_chdev_cxy %x / src_chdev_ptr %x / cycle %d\n",
    9292__FUNCTION__, local_cxy, lid, GET_CXY(src_chdev_xp), GET_PTR(src_chdev_xp), cycle );
     
    109109{
    110110
    111 #if CONFIG_DEBUG_DEV_PIC
    112 uint32_t cycle = (uint32_t)hal_get_cycles();
    113 if( CONFIG_DEBUG_DEV_PIC < cycle )
     111#if DEBUG_DEV_PIC
     112uint32_t cycle = (uint32_t)hal_get_cycles();
     113if( DEBUG_DEV_PIC < cycle )
    114114printk("\n[DBG] %s : core[%x,%d] / src_chdev_cxy %x / src_chdev_ptr %x / cycle %d\n",
    115115__FUNCTION__, local_cxy, lid, GET_CXY(src_chdev_xp), GET_PTR(src_chdev_xp), cycle );
     
    131131{
    132132
    133 #if CONFIG_DEBUG_DEV_PIC
    134 uint32_t cycle = (uint32_t)hal_get_cycles();
    135 if( CONFIG_DEBUG_DEV_PIC < cycle )
     133#if DEBUG_DEV_PIC
     134uint32_t cycle = (uint32_t)hal_get_cycles();
     135if( DEBUG_DEV_PIC < cycle )
    136136printk("\n[DBG] %s : core[%x,%d] / period %d / cycle %d\n",
    137137__FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , period, cycle );
     
    153153{
    154154
    155 #if CONFIG_DEBUG_DEV_PIC
    156 uint32_t cycle = (uint32_t)hal_get_cycles();
    157 if( CONFIG_DEBUG_DEV_PIC < cycle )
     155#if DEBUG_DEV_PIC
     156uint32_t cycle = (uint32_t)hal_get_cycles();
     157if( DEBUG_DEV_PIC < cycle )
    158158printk("\n[DBG] %s : core[%x,%d] / cycle %d\n",
    159159__FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , cycle );
     
    176176{
    177177
    178 #if CONFIG_DEBUG_DEV_PIC
    179 uint32_t cycle = (uint32_t)hal_get_cycles();
    180 if( CONFIG_DEBUG_DEV_PIC < cycle )
     178#if DEBUG_DEV_PIC
     179uint32_t cycle = (uint32_t)hal_get_cycles();
     180if( DEBUG_DEV_PIC < cycle )
    181181printk("\n[DBG] %s : src_core[%x,%d] / dst_core[%x,%d] / cycle %d\n",
    182182__FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, cxy, lid, cycle );
     
    198198{
    199199
    200 #if CONFIG_DEBUG_DEV_PIC
    201 uint32_t cycle = (uint32_t)hal_get_cycles();
    202 if( CONFIG_DEBUG_DEV_PIC < cycle )
     200#if DEBUG_DEV_PIC
     201uint32_t cycle = (uint32_t)hal_get_cycles();
     202if( DEBUG_DEV_PIC < cycle )
    203203printk("\n[DBG] %s : core[%x,%d] / cycle %d\n",
    204204__FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, cycle );
  • trunk/kernel/devices/dev_txt.c

    r436 r438  
    3838extern chdev_directory_t  chdev_dir;         // allocated in kernel_init.c
    3939
    40 #if (CONFIG_DEBUG_SYS_READ & 1)
     40#if (DEBUG_SYS_READ & 1)
    4141extern uint32_t enter_txt_read;
    4242extern uint32_t exit_txt_read;
    4343#endif
    4444
    45 #if (CONFIG_DEBUG_SYS_WRITE & 1)
     45#if (DEBUG_SYS_WRITE & 1)
    4646extern uint32_t enter_txt_write;
    4747extern uint32_t exit_txt_write;
     
    161161{
    162162
    163 #if (CONFIG_DEBUG_SYS_WRITE & 1)
     163#if (DEBUG_SYS_WRITE & 1)
    164164enter_txt_write = hal_time_stamp();
    165165#endif
    166166
    167 #if CONFIG_DEBUG_DEV_TXT_TX
     167#if DEBUG_DEV_TXT_TX
    168168uint32_t cycle = (uint32_t)hal_get_cycles();
    169 if( CONFIG_DEBUG_DEV_TXT_TX < cycle )
     169if( DEBUG_DEV_TXT_TX < cycle )
    170170printk("\n[DBG] %s : thread %x enters / cycle %d\n", __FUNCTION__, CURRENT_THREAD, cycle );
    171171#endif
     
    173173    return dev_txt_access( TXT_WRITE , channel , buffer , count );
    174174
    175 #if CONFIG_DEBUG_DEV_TXT_TX
     175#if DEBUG_DEV_TXT_TX
    176176cycle = (uint32_t)hal_get_cycles();
    177 if( CONFIG_DEBUG_DEV_TXT_TX < cycle )
     177if( DEBUG_DEV_TXT_TX < cycle )
    178178printk("\n[DBG] %s : thread %x exit / cycle %d\n", __FUNCTION__, CURRENT_THREAD, cycle );
    179179#endif
    180180
    181 #if (CONFIG_DEBUG_SYS_WRITE & 1)
     181#if (DEBUG_SYS_WRITE & 1)
    182182exit_txt_write = hal_time_stamp();
    183183#endif
     
    190190{
    191191
    192 #if (CONFIG_DEBUG_SYS_READ & 1)
     192#if (DEBUG_SYS_READ & 1)
    193193enter_txt_read = hal_time_stamp();
    194194#endif
    195195
    196 #if CONFIG_DEBUG_DEV_TXT_RX
     196#if DEBUG_DEV_TXT_RX
    197197uint32_t cycle = (uint32_t)hal_get_cycles();
    198 if( CONFIG_DEBUG_DEV_TXT_RX < cycle )
     198if( DEBUG_DEV_TXT_RX < cycle )
    199199printk("\n[DBG] %s : thread %x enters / cycle %d\n", __FUNCTION__, CURRENT_THREAD, cycle );
    200200#endif
     
    202202    return dev_txt_access( TXT_READ , channel , buffer , 1 );
    203203
    204 #if CONFIG_DEBUG_DEV_TXT_RX
     204#if DEBUG_DEV_TXT_RX
    205205cycle = (uint32_t)hal_get_cycles();
    206 if( CONFIG_DEBUG_DEV_TXT_RX < cycle )
     206if( DEBUG_DEV_TXT_RX < cycle )
    207207printk("\n[DBG] %s : thread %x exit / cycle %d\n", __FUNCTION__, CURRENT_THREAD, cycle );
    208208#endif
    209209
    210 #if (CONFIG_DEBUG_SYS_READ & 1)
     210#if (DEBUG_SYS_READ & 1)
    211211exit_txt_read = hal_time_stamp();
    212212#endif
  • trunk/kernel/fs/devfs.c

    r437 r438  
    4242extern chdev_directory_t    chdev_dir;      // allocated in kernel_init.c
    4343
    44 #if (CONFIG_DEBUG_SYS_READ & 1)
     44#if (DEBUG_SYS_READ & 1)
    4545extern uint32_t  enter_devfs_read;
    4646extern uint32_t  exit_devfs_read;
    4747#endif
    4848
    49 #if (CONFIG_DEBUG_SYS_WRITE & 1)
     49#if (DEBUG_SYS_WRITE & 1)
    5050extern uint32_t  enter_devfs_write;
    5151extern uint32_t  exit_devfs_write;
     
    9292    error_t  error;
    9393
    94 #if CONFIG_DEBUG_DEVFS_INIT
     94#if DEBUG_DEVFS_INIT
    9595uint32_t cycle = (uint32_t)hal_get_cycles();
    96 if( CONFIG_DEBUG_DEVFS_INIT < cycle )
     96if( DEBUG_DEVFS_INIT < cycle )
    9797printk("\n[DBG] %s : thread %x enter at cycle %d\n",
    9898__FUNCTION__ , CURRENT_THREAD , cycle );
     
    110110    assert( (error == 0) , __FUNCTION__ , "cannot create <dev>\n" );
    111111
    112 #if( CONFIG_DEBUG_DEVFS_INIT & 1 )
    113 if( CONFIG_DEBUG_DEVFS_INIT < cycle )
     112#if( DEBUG_DEVFS_INIT & 1 )
     113if( DEBUG_DEVFS_INIT < cycle )
    114114printk("\n[DBG] %s : created <dev> inode at cycle %d\n", __FUNCTION__, cycle );
    115115#endif
     
    126126    assert( (error == 0) , __FUNCTION__ , "cannot create <external>\n" );
    127127
    128 #if CONFIG_DEBUG_DEVFS_INIT
     128#if DEBUG_DEVFS_INIT
    129129cycle = (uint32_t)hal_get_cycles();
    130 if( CONFIG_DEBUG_DEVFS_INIT < cycle )
     130if( DEBUG_DEVFS_INIT < cycle )
    131131printk("\n[DBG] %s : thread %x exit at cycle %d\n",
    132132__FUNCTION__ , CURRENT_THREAD , cycle );
     
    147147    uint32_t      channel;
    148148
    149 #if CONFIG_DEBUG_DEVFS_INIT
     149#if DEBUG_DEVFS_INIT
    150150uint32_t cycle = (uint32_t)hal_get_cycles();
    151 if( CONFIG_DEBUG_DEVFS_INIT < cycle )
     151if( DEBUG_DEVFS_INIT < cycle )
    152152printk("\n[DBG] %s : thread %x enter at cycle %d\n",
    153153__FUNCTION__ , CURRENT_THREAD , cycle );
     
    357357    }
    358358
    359 #if CONFIG_DEBUG_DEVFS_INIT
     359#if DEBUG_DEVFS_INIT
    360360cycle = (uint32_t)hal_get_cycles();
    361 if( CONFIG_DEBUG_DEVFS_INIT < cycle )
     361if( DEBUG_DEVFS_INIT < cycle )
    362362printk("\n[DBG] %s : thread %x exit at cycle %d\n",
    363363__FUNCTION__ , CURRENT_THREAD , cycle );
     
    385385    char               k_buf[CONFIG_TXT_KBUF_SIZE];  // local kernel buffer
    386386
    387 #if (CONFIG_DEBUG_SYS_READ & 1)
     387#if (DEBUG_SYS_READ & 1)
    388388enter_devfs_read = hal_time_stamp();
    389389#endif
    390390
    391 #if (CONFIG_DEBUG_SYS_WRITE & 1)
     391#if (DEBUG_SYS_WRITE & 1)
    392392enter_devfs_write = hal_time_stamp();
    393393#endif
    394394
    395 #if CONFIG_DEBUG_DEVFS_MOVE
     395#if DEBUG_DEVFS_MOVE
    396396uint32_t cycle = (uint32_t)hal_get_cycles();
    397 if( CONFIG_DEBUG_DEVFS_MOVE < cycle )
     397if( DEBUG_DEVFS_MOVE < cycle )
    398398printk("\n[DBG] %s : thread %x enter / to_mem %d / cycle %d\n",
    399399__FUNCTION__ , CURRENT_THREAD , to_buffer , cycle );
     
    431431             }
    432432
    433 #if CONFIG_DEBUG_DEVFS_MOVE
     433#if DEBUG_DEVFS_MOVE
    434434cycle = (uint32_t)hal_get_cycles();
    435 if( CONFIG_DEBUG_DEVFS_MOVE < cycle )
     435if( DEBUG_DEVFS_MOVE < cycle )
    436436printk("\n[DBG] %s : thread %x exit / to_mem %d / cycle %d\n",
    437437__FUNCTION__ , CURRENT_THREAD , to_buffer / cycle );
    438438#endif
    439439
    440 #if (CONFIG_DEBUG_SYS_READ & 1)
     440#if (DEBUG_SYS_READ & 1)
    441441exit_devfs_read = hal_time_stamp();
    442442#endif
     
    455455            {
    456456
    457 #if CONFIG_DEBUG_DEVFS_MOVE
     457#if DEBUG_DEVFS_MOVE
    458458cycle = (uint32_t)hal_get_cycles();
    459 if( CONFIG_DEBUG_DEVFS_MOVE < cycle )
     459if( DEBUG_DEVFS_MOVE < cycle )
    460460printk("\n[DBG] %s : thread %x exit / to_mem %d / cycle %d\n",
    461461__FUNCTION__ , CURRENT_THREAD , to_buffer / cycle );
    462462#endif
    463463
    464 #if (CONFIG_DEBUG_SYS_WRITE & 1)
     464#if (DEBUG_SYS_WRITE & 1)
    465465exit_devfs_write = hal_time_stamp();
    466466#endif
  • trunk/kernel/fs/fatfs.c

    r435 r438  
    262262    "no FAT access required for first page\n");
    263263
    264 #if CONFIG_DEBUG_FATFS_GET_CLUSTER
     264#if DEBUG_FATFS_GET_CLUSTER
    265265uint32_t cycle = (uint32_t)hal_get_cycles();
    266 if( CONFIG_DEBUG_FATFS_GET_CLUSTER < cycle )
     266if( DEBUG_FATFS_GET_CLUSTER < cycle )
    267267printk("\n[DBG] %s : thread %x enter / first_cluster_id %d / searched_index / cycle %d\n",
    268268__FUNCTION__, CURRENT_THREAD, first_cluster_id, searched_page_index, cycle );
     
    293293        next_cluster_id = current_page_buffer[current_page_offset];
    294294
    295 #if (CONFIG_DEBUG_FATFS_GET_CLUSTER & 1)
    296 if( CONFIG_DEBUG_FATFS_GET_CLUSTER < cycle )
     295#if (DEBUG_FATFS_GET_CLUSTER & 1)
     296if( DEBUG_FATFS_GET_CLUSTER < cycle )
    297297printk("\n[DBG] %s : traverse FAT / current_page_index = %d\n"
    298298"current_page_offset = %d / next_cluster_id = %d\n",
     
    308308    if( next_cluster_id == 0xFFFFFFFF ) return EIO;
    309309   
    310 #if CONFIG_DEBUG_FATFS_GET_CLUSTER
     310#if DEBUG_FATFS_GET_CLUSTER
    311311cycle = (uint32_t)hal_get_cycles();
    312 if( CONFIG_DEBUG_FATFS_GET_CLUSTER < cycle )
     312if( DEBUG_FATFS_GET_CLUSTER < cycle )
    313313printk("\n[DBG] %s : thread %x exit / searched_cluster_id = %d / cycle %d\n",
    314314__FUNCTION__, CURRENT_THREAD, next_cluster_id / cycle );
     
    345345    uint8_t     * buffer;
    346346
    347 #if CONFIG_DEBUG_FATFS_INIT
     347#if DEBUG_FATFS_INIT
    348348uint32_t cycle = (uint32_t)hal_get_cycles();
    349 if( CONFIG_DEBUG_FATFS_INIT < cycle )
     349if( DEBUG_FATFS_INIT < cycle )
    350350printk("\n[DBG] %s : thread %x enter for fatfs_ctx = %x / cycle %d\n",
    351351__FUNCTION__ , CURRENT_THREAD , fatfs_ctx , cycle );
     
    370370    "cannot access boot record\n" );
    371371
    372 #if (CONFIG_DEBUG_FATFS_INIT & 0x1)
    373 if( CONFIG_DEBUG_FATFS_INIT < cycle )
     372#if (DEBUG_FATFS_INIT & 0x1)
     373if( DEBUG_FATFS_INIT < cycle )
    374374{
    375375    uint32_t   line;
     
    448448    fatfs_ctx->fat_mapper_xp         = XPTR( local_cxy , fat_mapper );
    449449
    450 #if CONFIG_DEBUG_FATFS_INIT
     450#if DEBUG_FATFS_INIT
    451451cycle = (uint32_t)hal_get_cycles();
    452 if( CONFIG_DEBUG_FATFS_INIT < cycle )
     452if( DEBUG_FATFS_INIT < cycle )
    453453printk("\n[DBG] %s : thread %x exit for fatfs_ctx = %x / cycle %d\n",
    454454__FUNCTION__ , CURRENT_THREAD , fatfs_ctx , cycle );
     
    486486    inode = mapper->inode;
    487487
    488 #if CONFIG_DEBUG_FATFS_MOVE
     488#if DEBUG_FATFS_MOVE
    489489uint32_t cycle = (uint32_t)hal_get_cycles();
    490 if( CONFIG_DEBUG_FATFS_MOVE < cycle )
     490if( DEBUG_FATFS_MOVE < cycle )
    491491printk("\n[DBG] %s : thread %x enter / page %d / inode %x / mapper %x / cycle %d\n",
    492492__FUNCTION__ , CURRENT_THREAD , index , inode , mapper , cycle );
     
    507507        lba = fatfs_ctx->fat_begin_lba + (count * index);
    508508 
    509 #if (CONFIG_DEBUG_FATFS_MOVE & 0x1)
    510 if( CONFIG_DEBUG_FATFS_MOVE < cycle )
     509#if (DEBUG_FATFS_MOVE & 0x1)
     510if( DEBUG_FATFS_MOVE < cycle )
    511511printk("\n[DBG] %s : access FAT on device / lba = %d\n", __FUNCTION__ , lba );
    512512#endif
     
    541541            {
    542542
    543 #if (CONFIG_DEBUG_FATFS_MOVE & 0x1)
    544 if( CONFIG_DEBUG_FATFS_MOVE < cycle )
     543#if (DEBUG_FATFS_MOVE & 0x1)
     544if( DEBUG_FATFS_MOVE < cycle )
    545545print("\n[DBG] %s : access local FAT mapper\n"
    546546"fat_mapper_cxy = %x / fat_mapper_ptr = %x / first_cluster_id = %d / index = %d\n",
     
    555555            {
    556556
    557 #if (CONFIG_DEBUG_FATFS_MOVE & 0x1)
    558 if( CONFIG_DEBUG_FATFS_MOVE < cycle )
     557#if (DEBUG_FATFS_MOVE & 0x1)
     558if( DEBUG_FATFS_MOVE < cycle )
    559559printk("\n[DBG] %s : access remote FAT mapper\n"
    560560"fat_mapper_cxy = %x / fat_mapper_ptr = %x / first_cluster_id = %d / index = %d\n",
     
    572572        }
    573573
    574 #if (CONFIG_DEBUG_FATFS_MOVE & 0x1)
    575 if( CONFIG_DEBUG_FATFS_MOVE < cycle )
     574#if (DEBUG_FATFS_MOVE & 0x1)
     575if( DEBUG_FATFS_MOVE < cycle )
    576576printk("\n[DBG] %s : access device for inode %x / cluster_id %d\n",
    577577__FUNCTION__ , inode , searched_cluster_id );
     
    588588    }
    589589
    590 #if CONFIG_DEBUG_FATFS_MOVE
     590#if DEBUG_FATFS_MOVE
    591591cycle = (uint32_t)hal_get_cycles();
    592 if( CONFIG_DEBUG_FATFS_MOVE < cycle )
     592if( DEBUG_FATFS_MOVE < cycle )
    593593printk("\n[DBG] %s : thread %x exit / page %d / inode %x / mapper %x / cycle %d\n",
    594594__FUNCTION__ , CURRENT_THREAD , index , inode , mapper , cycle );
    595595#endif
    596596
    597 #if (CONFIG_DEBUG_FATFS_MOVE & 0x1)
    598 if( CONFIG_DEBUG_FATFS_MOVE < cycle )
     597#if (DEBUG_FATFS_MOVE & 0x1)
     598if( DEBUG_FATFS_MOVE < cycle )
    599599{
    600600    uint32_t * tab = (uint32_t *)buffer;
     
    623623    // - scan the directory entries in each 4 Kbytes page
    624624
    625 #if CONFIG_DEBUG_FATFS_LOAD
     625#if DEBUG_FATFS_LOAD
    626626uint32_t cycle = (uint32_t)hal_get_cycles();
    627 if( CONFIG_DEBUG_FATFS_LOAD < cycle )
     627if( DEBUG_FATFS_LOAD < cycle )
    628628printk("\n[DBG] %s : thread %x enter for child <%s> in parent inode %x / cycle %d\n",
    629629__FUNCTION__ , CURRENT_THREAD , name , parent_inode , cycle );
     
    665665        base = (uint8_t *)GET_PTR( base_xp );
    666666
    667 #if (CONFIG_DEBUG_FATFS_LOAD & 0x1)
    668 if( CONFIG_DEBUG_FATFS_LOAD < cycle )
     667#if (DEBUG_FATFS_LOAD & 0x1)
     668if( DEBUG_FATFS_LOAD < cycle )
    669669{
    670670    uint32_t * buf = (uint32_t *)base;
     
    749749    {
    750750
    751 #if CONFIG_DEBUG_FATFS_LOAD
     751#if DEBUG_FATFS_LOAD
    752752cycle = (uint32_t)hal_get_cycles();
    753 if( CONFIG_DEBUG_FATFS_LOAD < cycle )
     753if( DEBUG_FATFS_LOAD < cycle )
    754754printk("\n[DBG] %s : thread %x exit / child <%s> not found / cycle %d\n",
    755755__FUNCTION__ , CURRENT_THREAD, name, cycle );
     
    771771        hal_remote_sw( XPTR( child_cxy , &child_ptr->extend ) , cluster );
    772772
    773 #if CONFIG_DEBUG_FATFS_LOAD
     773#if DEBUG_FATFS_LOAD
    774774cycle = (uint32_t)hal_get_cycles();
    775 if( CONFIG_DEBUG_FATFS_LOAD < cycle )
     775if( DEBUG_FATFS_LOAD < cycle )
    776776printk("\n[DBG] %s : thread %x exit / child <%s> loaded / cycle %d\n",
    777777__FUNCTION__ , CURRENT_THREAD, name, cycle );
  • trunk/kernel/fs/vfs.c

    r437 r438  
    157157    error_t            error;
    158158
    159 #if CONFIG_DEBUG_VFS_INODE_CREATE
     159#if DEBUG_VFS_INODE_CREATE
    160160uint32_t cycle = (uint32_t)hal_get_cycles();
    161 if( CONFIG_DEBUG_VFS_INODE_CREATE < cycle )
     161if( DEBUG_VFS_INODE_CREATE < cycle )
    162162printk("\n[DBG] %s : thread %x enter / dentry = %x in cluster %x / cycle %d\n",
    163163__FUNCTION__, CURRENT_THREAD, GET_PTR(dentry_xp), GET_CXY(dentry_xp), cycle );
     
    234234    remote_spinlock_init( XPTR( local_cxy , &inode->main_lock ) );
    235235
    236 #if CONFIG_DEBUG_VFS_INODE_CREATE
     236#if DEBUG_VFS_INODE_CREATE
    237237cycle = (uint32_t)hal_get_cycles();
    238 if( CONFIG_DEBUG_VFS_INODE_CREATE < cycle )
     238if( DEBUG_VFS_INODE_CREATE < cycle )
    239239printk("\n[DBG] %s : thread %x exit / inode = %x in cluster %x / cycle %d\n",
    240240__FUNCTION__, CURRENT_THREAD, inode, local_cxy, cycle );
     
    272272{
    273273
    274 #if CONFIG_DEBUG_VFS_INODE_LOAD
     274#if DEBUG_VFS_INODE_LOAD
    275275uint32_t cycle = (uint32_t)hal_get_cycles();
    276 if( CONFIG_DEBUG_VFS_INODE_LOAD < cycle )
     276if( DEBUG_VFS_INODE_LOAD < cycle )
    277277printk("\n[DBG] %s : thread %x enter for <%s> / cycle %d\n",
    278278__FUNCTION__, CURRENT_THREAD , name , cycle );
     
    306306    }
    307307
    308 #if CONFIG_DEBUG_VFS_INODE_LOAD
     308#if DEBUG_VFS_INODE_LOAD
    309309cycle = (uint32_t)hal_get_cycles();
    310 if( CONFIG_DEBUG_VFS_INODE_LOAD < cycle )
     310if( DEBUG_VFS_INODE_LOAD < cycle )
    311311printk("\n[DBG] %s : thread %x exit for <%s> / cycle %d\n",
    312312__FUNCTION__, CURRENT_THREAD , name , cycle );
     
    433433        kmem_req_t       req;        // request to kernel memory allocator
    434434
    435 #if CONFIG_DEBUG_VFS_DENTRY_CREATE
     435#if DEBUG_VFS_DENTRY_CREATE
    436436uint32_t cycle = (uint32_t)hal_get_cycles();
    437 if( CONFIG_DEBUG_VFS_DENTRY_CREATE < cycle )
     437if( DEBUG_VFS_DENTRY_CREATE < cycle )
    438438printk("\n[DBG] %s : thread %x enter for <%s> / parent_inode %x / cycle %d\n",
    439439__FUNCTION__, CURRENT_THREAD , name , parent , cycle );
     
    456456    {
    457457
    458 #if CONFIG_DEBUG_SYSCALLS_ERROR
     458#if DEBUG_SYSCALLS_ERROR
    459459printk("\n[ERROR] in %s : name <name> too long\n", __FUNCTION__ , name );
    460460#endif
     
    471471    {
    472472
    473 #if CONFIG_DEBUG_SYSCALLS_ERROR
     473#if DEBUG_SYSCALLS_ERROR
    474474printk("\n[ERROR] in %s : cannot allocate dentry\n", __FUNCTION__ );
    475475#endif
     
    484484    strcpy( dentry->name , name );
    485485
    486 #if( CONFIG_DEBUG_VFS_DENTRY_CREATE & 1 )
     486#if( DEBUG_VFS_DENTRY_CREATE & 1 )
    487487cycle = (uint32_t)hal_get_cycles();
    488 if( CONFIG_DEBUG_VFS_DENTRY_CREATE < cycle )
     488if( DEBUG_VFS_DENTRY_CREATE < cycle )
    489489printk("\n[DBG] %s : dentry initialised\n", __FUNCTION__ );
    490490#endif
     
    495495                  XPTR( local_cxy , &dentry->list ) );
    496496
    497 #if( CONFIG_DEBUG_VFS_DENTRY_CREATE & 1 )
     497#if( DEBUG_VFS_DENTRY_CREATE & 1 )
    498498cycle = (uint32_t)hal_get_cycles();
    499 if( CONFIG_DEBUG_VFS_DENTRY_CREATE < cycle )
     499if( DEBUG_VFS_DENTRY_CREATE < cycle )
    500500printk("\n[DBG] %s : dentry registerd in htab\n", __FUNCTION__ );
    501501#endif
     
    504504    *dentry_xp = XPTR( local_cxy , dentry );
    505505
    506 #if CONFIG_DEBUG_VFS_DENTRY_CREATE
     506#if DEBUG_VFS_DENTRY_CREATE
    507507cycle = (uint32_t)hal_get_cycles();
    508 if( CONFIG_DEBUG_VFS_DENTRY_CREATE < cycle )
     508if( DEBUG_VFS_DENTRY_CREATE < cycle )
    509509printk("\n[DBG] %s : thread %x exit for <%s> / dentry %x / cycle %d\n",
    510510__FUNCTION__, CURRENT_THREAD , name , dentry , cycle );
     
    627627    uint32_t      file_id;      // created file descriptor index in reference fd_array
    628628
    629 #if CONFIG_DEBUG_VFS_OPEN
     629#if DEBUG_VFS_OPEN
    630630uint32_t cycle = (uint32_t)hal_get_cycles();
    631 if( CONFIG_DEBUG_VFS_OPEN < cycle )
     631if( DEBUG_VFS_OPEN < cycle )
    632632printk("\n[DBG] %s :  thread %x enter for <%s> / cycle %d\n",
    633633__FUNCTION__, CURRENT_THREAD, path, cycle );
     
    674674    if( error ) return error;
    675675
    676 #if CONFIG_DEBUG_VFS_OPEN
     676#if DEBUG_VFS_OPEN
    677677cycle = (uint32_t)hal_get_cycles();
    678 if( CONFIG_DEBUG_VFS_OPEN < cycle )
     678if( DEBUG_VFS_OPEN < cycle )
    679679printk("\n[DBG] %s : thread %x exit for <%s> / file %x in cluster %x / cycle %d\n",
    680680__FUNCTION__, CURRENT_THREAD, path, GET_PTR(file_xp), GET_CXY(file_xp), cycle );
     
    13651365    process = this->process;
    13661366
    1367 #if CONFIG_DEBUG_VFS_LOOKUP
     1367#if DEBUG_VFS_LOOKUP
    13681368uint32_t cycle = (uint32_t)hal_get_cycles();
    1369 if( CONFIG_DEBUG_VFS_LOOKUP < cycle )
     1369if( DEBUG_VFS_LOOKUP < cycle )
    13701370printk("\n[DBG] %s : thread %x enter for <%s> / cycle %d\n",
    13711371__FUNCTION__, CURRENT_THREAD, pathname, cycle );
     
    13931393        vfs_get_name_from_path( current , name , &next , &last );
    13941394
    1395 #if (CONFIG_DEBUG_VFS_LOOKUP & 1)
    1396 if( CONFIG_DEBUG_VFS_LOOKUP < cycle )
     1395#if (DEBUG_VFS_LOOKUP & 1)
     1396if( DEBUG_VFS_LOOKUP < cycle )
    13971397printk("\n[DBG] %s : look for <%s> / last = %d\n", __FUNCTION__ , name , last );
    13981398#endif
     
    14141414        {
    14151415
    1416 #if (CONFIG_DEBUG_VFS_LOOKUP & 1)
    1417 if( CONFIG_DEBUG_VFS_LOOKUP < cycle )
     1416#if (DEBUG_VFS_LOOKUP & 1)
     1417if( DEBUG_VFS_LOOKUP < cycle )
    14181418printk("\n[DBG] %s : miss <%s> => load it\n", __FUNCTION__ , name );
    14191419#endif
     
    15001500            vfs_inode_lock( parent_xp );
    15011501
    1502 #if (CONFIG_DEBUG_VFS_LOOKUP & 1)
    1503 if( CONFIG_DEBUG_VFS_LOOKUP < cycle )
     1502#if (DEBUG_VFS_LOOKUP & 1)
     1503if( DEBUG_VFS_LOOKUP < cycle )
    15041504printk("\n[DBG] %s : created node <%s>\n", __FUNCTION__ , name );
    15051505#endif
     
    15071507        }
    15081508
    1509 #if (CONFIG_DEBUG_VFS_LOOKUP & 1)
    1510 if( CONFIG_DEBUG_VFS_LOOKUP < cycle )
     1509#if (DEBUG_VFS_LOOKUP & 1)
     1510if( DEBUG_VFS_LOOKUP < cycle )
    15111511printk("\n[DBG] %s : found <%s> / inode %x in cluster %x\n",
    15121512__FUNCTION__ , name , GET_PTR(child_xp) , GET_CXY(child_xp) );
     
    15361536    vfs_inode_unlock( parent_xp );
    15371537
    1538 #if CONFIG_DEBUG_VFS_LOOKUP
     1538#if DEBUG_VFS_LOOKUP
    15391539cycle = (uint32_t)hal_get_cycles();
    1540 if( CONFIG_DEBUG_VFS_LOOKUP < cycle )
     1540if( DEBUG_VFS_LOOKUP < cycle )
    15411541printk("\n[DBG] %s : thread %x exit for <%s> / inode %x in cluster %x / cycle %d\n",
    15421542__FUNCTION__, CURRENT_THREAD, pathname, GET_PTR(child_xp), GET_CXY(child_xp), cycle );
     
    16391639    parent_ptr = (vfs_inode_t *)GET_PTR( parent_xp );
    16401640
    1641 #if CONFIG_DEBUG_VFS_ADD_CHILD
     1641#if DEBUG_VFS_ADD_CHILD
    16421642uint32_t cycle = (uint32_t)hal_get_cycles();
    1643 if( CONFIG_DEBUG_VFS_ADD_CHILD < cycle )
     1643if( DEBUG_VFS_ADD_CHILD < cycle )
    16441644printk("\n[DBG] %s : thread %x enter for <%s> / child_cxy = %x / parent_cxy = %x\n",
    16451645__FUNCTION__ , CURRENT_THREAD , name , child_cxy , parent_cxy );
     
    16541654                                   &dentry_xp );
    16551655
    1656 #if (CONFIG_DEBUG_VFS_ADD_CHILD & 1)
    1657 if( (CONFIG_DEBUG_VFS_ADD_CHILD < cycle) && (error == 0) )
     1656#if (DEBUG_VFS_ADD_CHILD & 1)
     1657if( (DEBUG_VFS_ADD_CHILD < cycle) && (error == 0) )
    16581658printk("\n[DBG] %s : dentry <%s> created in cluster %x\n", __FUNCTION__, name, local_cxy );
    16591659#endif
     
    16691669                                      &error );
    16701670
    1671 #if (CONFIG_DEBUG_VFS_ADD_CHILD & 1)
    1672 if( (CONFIG_DEBUG_VFS_ADD_CHILD < cycle) && (error == 0) )
     1671#if (DEBUG_VFS_ADD_CHILD & 1)
     1672if( (DEBUG_VFS_ADD_CHILD < cycle) && (error == 0) )
    16731673printk("\n[DBG] %s : dentry <%s> created in cluster %x\n", __FUNCTION__, name, parent_cxy );
    16741674#endif
     
    17011701                                  &inode_xp );
    17021702
    1703 #if (CONFIG_DEBUG_VFS_ADD_CHILD & 1)
    1704 if( CONFIG_DEBUG_VFS_ADD_CHILD < cycle )
     1703#if (DEBUG_VFS_ADD_CHILD & 1)
     1704if( DEBUG_VFS_ADD_CHILD < cycle )
    17051705printk("\n[DBG] %s : inode <%x> created in cluster %x\n",
    17061706__FUNCTION__ , GET_PTR(inode_xp) , local_cxy );
     
    17221722                                     &error );
    17231723
    1724 #if (CONFIG_DEBUG_VFS_ADD_CHILD & 1)
    1725 if( CONFIG_DEBUG_VFS_ADD_CHILD < cycle )
     1724#if (DEBUG_VFS_ADD_CHILD & 1)
     1725if( DEBUG_VFS_ADD_CHILD < cycle )
    17261726printk("\n[DBG] %s : inode <%s> created in cluster %x\n",
    17271727__FUNCTION__ , GET_PTR(inode_xp) , child_cxy );
     
    17461746    hal_remote_swd( XPTR( dentry_cxy , &dentry_ptr->child_xp ) , inode_xp );
    17471747
    1748 #if CONFIG_DEBUG_VFS_ADD_CHILD
     1748#if DEBUG_VFS_ADD_CHILD
    17491749cycle = (uint32_t)hal_get_cycles();
    1750 if( CONFIG_DEBUG_VFS_ADD_CHILD < cycle )
     1750if( DEBUG_VFS_ADD_CHILD < cycle )
    17511751printk("\n[DBG] %s : thread %x exit for <%s>\n",
    17521752__FUNCTION__ , CURRENT_THREAD , name );
     
    17751775    assert( (mapper != NULL) , __FUNCTION__ , "no mapper for page\n" );
    17761776
    1777 #if CONFIG_DEBUG_VFS_MAPPER_MOVE
     1777#if DEBUG_VFS_MAPPER_MOVE
    17781778uint32_t cycle = (uint32_t)hal_get_cycles();
    1779 if( CONFIG_DEBUG_VFS_MAPPER_MOVE < cycle )
     1779if( DEBUG_VFS_MAPPER_MOVE < cycle )
    17801780printk("\n[DBG] %s : thread %x enter for page %d / mapper %x / inode %x / cycle %d\n",
    17811781__FUNCTION__, CURRENT_THREAD, page->index, mapper, mapper->inode, cycle );
     
    18051805    }
    18061806
    1807 #if CONFIG_DEBUG_VFS_MAPPER_MOVE
     1807#if DEBUG_VFS_MAPPER_MOVE
    18081808cycle = (uint32_t)hal_get_cycles();
    1809 if( CONFIG_DEBUG_VFS_MAPPER_MOVE < cycle )
     1809if( DEBUG_VFS_MAPPER_MOVE < cycle )
    18101810printk("\n[DBG] %s : thread %x exit for page %d / mapper %x / inode %x / cycle %d\n",
    18111811__FUNCTION__, CURRENT_THREAD, page->index, mapper, mapper->inode, cycle );
     
    18291829    assert( (mapper != NULL) , __FUNCTION__ , "mapper pointer is NULL\n" );
    18301830
    1831 #if CONFIG_DEBUG_VFS_MAPPER_LOAD
     1831#if DEBUG_VFS_MAPPER_LOAD
    18321832uint32_t cycle = (uint32_t)hal_get_cycles();
    1833 if( CONFIG_DEBUG_VFS_MAPPER_MOVE < cycle )
     1833if( DEBUG_VFS_MAPPER_MOVE < cycle )
    18341834printk("\n[DBG] %s : thread %x enter for inode %x in cluster %x / cycle %d\n",
    18351835__FUNCTION__, CURRENT_THREAD, inode, local_cxy, cycle );
     
    18501850    }
    18511851
    1852 #if CONFIG_DEBUG_VFS_MAPPER_LOAD
     1852#if DEBUG_VFS_MAPPER_LOAD
    18531853cycle = (uint32_t)hal_get_cycles();
    1854 if( CONFIG_DEBUG_VFS_MAPPER_MOVE < cycle )
     1854if( DEBUG_VFS_MAPPER_MOVE < cycle )
    18551855printk("\n[DBG] %s : thread %x exit for inode %x in cluster %x / cycle %d\n",
    18561856__FUNCTION__, CURRENT_THREAD, inode, local_cxy, cycle );
  • trunk/kernel/kern/chdev.c

    r437 r438  
    3939extern chdev_directory_t    chdev_dir;   // allocated in kernel_init.c
    4040
    41 #if (CONFIG_DEBUG_SYS_READ & 1)
     41#if (DEBUG_SYS_READ & 1)
    4242extern uint32_t enter_chdev_cmd_read;
    4343extern uint32_t exit_chdev_cmd_read;
     
    4646#endif
    4747
    48 #if (CONFIG_DEBUG_SYS_WRITE & 1)
     48#if (DEBUG_SYS_WRITE & 1)
    4949extern uint32_t enter_chdev_cmd_write;
    5050extern uint32_t exit_chdev_cmd_write;
     
    130130    uint32_t   save_sr;       // for critical section
    131131
    132 #if (CONFIG_DEBUG_SYS_READ & 1)
     132#if (DEBUG_SYS_READ & 1)
    133133enter_chdev_cmd_read = (uint32_t)hal_get_cycles();
    134134#endif
    135135
    136 #if (CONFIG_DEBUG_SYS_WRITE & 1)
     136#if (DEBUG_SYS_WRITE & 1)
    137137enter_chdev_cmd_write = (uint32_t)hal_get_cycles();
    138138#endif
     
    144144    chdev_t * chdev_ptr = (chdev_t *)GET_PTR( chdev_xp );
    145145
    146 #if (CONFIG_DEBUG_CHDEV_CMD_RX || CONFIG_DEBUG_CHDEV_CMD_TX)
     146#if (DEBUG_CHDEV_CMD_RX || DEBUG_CHDEV_CMD_TX)
    147147bool_t is_rx = hal_remote_lw( XPTR( chdev_cxy , &chdev_ptr->is_rx ) );
    148148#endif
    149149   
    150 #if CONFIG_DEBUG_CHDEV_CMD_RX
     150#if DEBUG_CHDEV_CMD_RX
    151151uint32_t rx_cycle = (uint32_t)hal_get_cycles();
    152 if( (is_rx) && (CONFIG_DEBUG_CHDEV_CMD_RX < rx_cycle) )
     152if( (is_rx) && (DEBUG_CHDEV_CMD_RX < rx_cycle) )
    153153printk("\n[DBG] %s : client_thread %x (%s) enter for RX / cycle %d\n",
    154154__FUNCTION__, this, thread_type_str(this->type) , rx_cycle );
    155155#endif
    156156
    157 #if CONFIG_DEBUG_CHDEV_CMD_TX
     157#if DEBUG_CHDEV_CMD_TX
    158158uint32_t tx_cycle = (uint32_t)hal_get_cycles();
    159 if( (is_rx == 0) && (CONFIG_DEBUG_CHDEV_CMD_TX < tx_cycle) )
     159if( (is_rx == 0) && (DEBUG_CHDEV_CMD_TX < tx_cycle) )
    160160printk("\n[DBG] %s : client_thread %x (%s) enter for TX / cycle %d\n",
    161161__FUNCTION__, this, thread_type_str(this->type) , tx_cycle );
     
    207207    hal_restore_irq( save_sr );
    208208
    209 #if CONFIG_DEBUG_CHDEV_CMD_RX
     209#if DEBUG_CHDEV_CMD_RX
    210210rx_cycle = (uint32_t)hal_get_cycles();
    211 if( (is_rx) && (CONFIG_DEBUG_CHDEV_CMD_RX < rx_cycle) )
     211if( (is_rx) && (DEBUG_CHDEV_CMD_RX < rx_cycle) )
    212212printk("\n[DBG] %s : client_thread %x (%s) exit for RX / cycle %d\n",
    213213__FUNCTION__, this, thread_type_str(this->type) , rx_cycle );
    214214#endif
    215215
    216 #if CONFIG_DEBUG_CHDEV_CMD_TX
     216#if DEBUG_CHDEV_CMD_TX
    217217tx_cycle = (uint32_t)hal_get_cycles();
    218 if( (is_rx == 0) && (CONFIG_DEBUG_CHDEV_CMD_TX < tx_cycle) )
     218if( (is_rx == 0) && (DEBUG_CHDEV_CMD_TX < tx_cycle) )
    219219printk("\n[DBG] %s : client_thread %x (%s) exit for TX / cycle %d\n",
    220220__FUNCTION__, this, thread_type_str(this->type) , tx_cycle );
    221221#endif
    222222
    223 #if (CONFIG_DEBUG_SYS_READ & 1)
     223#if (DEBUG_SYS_READ & 1)
    224224exit_chdev_cmd_read = (uint32_t)hal_get_cycles();
    225225#endif
    226226
    227 #if (CONFIG_DEBUG_SYS_WRITE & 1)
     227#if (DEBUG_SYS_WRITE & 1)
    228228exit_chdev_cmd_write = (uint32_t)hal_get_cycles();
    229229#endif
     
    275275            client_ptr = (thread_t *)GET_PTR( client_xp );
    276276
    277 #if CONFIG_DEBUG_CHDEV_SERVER_RX
     277#if DEBUG_CHDEV_SERVER_RX
    278278uint32_t rx_cycle = (uint32_t)hal_get_cycles();
    279 if( (chdev->is_rx) && (CONFIG_DEBUG_CHDEV_SERVER_RX < rx_cycle) )
     279if( (chdev->is_rx) && (DEBUG_CHDEV_SERVER_RX < rx_cycle) )
    280280printk("\n[DBG] %s : server_thread %x start RX / client %x / cycle %d\n",
    281281__FUNCTION__ , server , client_ptr , rx_cycle );
    282282#endif
    283283
    284 #if CONFIG_DEBUG_CHDEV_SERVER_TX
     284#if DEBUG_CHDEV_SERVER_TX
    285285uint32_t tx_cycle = (uint32_t)hal_get_cycles();
    286 if( (chdev->is_rx == 0) && (CONFIG_DEBUG_CHDEV_SERVER_TX < tx_cycle) )
     286if( (chdev->is_rx == 0) && (DEBUG_CHDEV_SERVER_TX < tx_cycle) )
    287287printk("\n[DBG] %s : server_thread %x start TX / client %x / cycle %d\n",
    288288__FUNCTION__ , server , client_ptr , tx_cycle );
    289289#endif
    290290
    291 #if (CONFIG_DEBUG_SYS_READ & 1)
     291#if (DEBUG_SYS_READ & 1)
    292292enter_chdev_server_read = (uint32_t)hal_get_cycles();
    293293#endif
    294294
    295 #if (CONFIG_DEBUG_SYS_WRITE & 1)
     295#if (DEBUG_SYS_WRITE & 1)
    296296enter_chdev_server_write = (uint32_t)hal_get_cycles();
    297297#endif
     
    308308            thread_unblock( client_xp , THREAD_BLOCKED_IO );
    309309
    310 #if CONFIG_DEBUG_CHDEV_SERVER_RX
     310#if DEBUG_CHDEV_SERVER_RX
    311311rx_cycle = (uint32_t)hal_get_cycles();
    312 if( (chdev->is_rx) && (CONFIG_DEBUG_CHDEV_SERVER_RX < rx_cycle) )
     312if( (chdev->is_rx) && (DEBUG_CHDEV_SERVER_RX < rx_cycle) )
    313313printk("\n[DBG] %s : server_thread %x completes RX / client %x / cycle %d\n",
    314314__FUNCTION__ , server , client_ptr , rx_cycle );
    315315#endif
    316316
    317 #if CONFIG_DEBUG_CHDEV_SERVER_TX
     317#if DEBUG_CHDEV_SERVER_TX
    318318tx_cycle = (uint32_t)hal_get_cycles();
    319 if( (chdev->is_rx == 0) && (CONFIG_DEBUG_CHDEV_SERVER_TX < tx_cycle) )
     319if( (chdev->is_rx == 0) && (DEBUG_CHDEV_SERVER_TX < tx_cycle) )
    320320printk("\n[DBG] %s : server_thread %x completes TX / client %x / cycle %d\n",
    321321__FUNCTION__ , server , client_ptr , tx_cycle );
    322322#endif
    323323
    324 #if (CONFIG_DEBUG_SYS_READ & 1)
     324#if (DEBUG_SYS_READ & 1)
    325325exit_chdev_server_read = (uint32_t)hal_get_cycles();
    326326#endif
    327327
    328 #if (CONFIG_DEBUG_SYS_WRITE & 1)
     328#if (DEBUG_SYS_WRITE & 1)
    329329exit_chdev_server_write = (uint32_t)hal_get_cycles();
    330330#endif
  • trunk/kernel/kern/cluster.c

    r437 r438  
    8989        spinlock_init( &cluster->kcm_lock );
    9090
    91 #if CONFIG_DEBUG_CLUSTER_INIT
     91#if DEBUG_CLUSTER_INIT
    9292uint32_t cycle = (uint32_t)hal_get_cycles();
    93 if( CONFIG_DEBUG_CLUSTER_INIT < cycle )
     93if( DEBUG_CLUSTER_INIT < cycle )
    9494printk("\n[DBG] %s : thread %x enters for cluster %x / cycle %d\n",
    9595__FUNCTION__ , CURRENT_THREAD , local_cxy , cycle );
     
    9999    cluster->dqdt_root_level = dqdt_init( info->x_size,
    100100                                          info->y_size,
    101                                           info->y_width );
    102     cluster->threads_var = 0;
    103     cluster->pages_var   = 0;
     101                                          info->y_width ) - 1;
    104102
    105103    // initialises embedded PPM
     
    113111    }
    114112
    115 #if( CONFIG_DEBUG_CLUSTER_INIT & 1 )
     113#if( DEBUG_CLUSTER_INIT & 1 )
    116114cycle = (uint32_t)hal_get_cycles();
    117 if( CONFIG_DEBUG_CLUSTER_INIT < cycle )
     115if( DEBUG_CLUSTER_INIT < cycle )
    118116printk("\n[DBG] %s : PPM initialized in cluster %x / cycle %d\n",
    119117__FUNCTION__ , local_cxy , cycle );
     
    123121        khm_init( &cluster->khm );
    124122
    125 #if( CONFIG_DEBUG_CLUSTER_INIT & 1 )
     123#if( DEBUG_CLUSTER_INIT & 1 )
    126124uint32_t cycle = (uint32_t)hal_get_cycles();
    127 if( CONFIG_DEBUG_CLUSTER_INIT < cycle )
     125if( DEBUG_CLUSTER_INIT < cycle )
    128126printk("\n[DBG] %s : KHM initialized in cluster %x at cycle %d\n",
    129127__FUNCTION__ , local_cxy , hal_get_cycles() );
     
    133131        kcm_init( &cluster->kcm , KMEM_KCM );
    134132
    135 #if( CONFIG_DEBUG_CLUSTER_INIT & 1 )
     133#if( DEBUG_CLUSTER_INIT & 1 )
    136134uint32_t cycle = (uint32_t)hal_get_cycles();
    137 if( CONFIG_DEBUG_CLUSTER_INIT < cycle )
     135if( DEBUG_CLUSTER_INIT < cycle )
    138136printk("\n[DBG] %s : KCM initialized in cluster %x at cycle %d\n",
    139137__FUNCTION__ , local_cxy , hal_get_cycles() );
     
    148146        }
    149147
    150 #if( CONFIG_DEBUG_CLUSTER_INIT & 1 )
     148#if( DEBUG_CLUSTER_INIT & 1 )
    151149cycle = (uint32_t)hal_get_cycles();
    152 if( CONFIG_DEBUG_CLUSTER_INIT < cycle )
     150if( DEBUG_CLUSTER_INIT < cycle )
    153151printk("\n[DBG] %s : cores initialized in cluster %x / cycle %d\n",
    154152__FUNCTION__ , local_cxy , cycle );
     
    159157    cluster->rpc_threads = 0;
    160158
    161 #if( CONFIG_DEBUG_CLUSTER_INIT & 1 )
     159#if( DEBUG_CLUSTER_INIT & 1 )
    162160cycle = (uint32_t)hal_get_cycles();
    163 if( CONFIG_DEBUG_CLUSTER_INIT < cycle )
     161if( DEBUG_CLUSTER_INIT < cycle )
    164162printk("\n[DBG] %s : RPC fifo inialized in cluster %x at cycle %d\n",
    165163__FUNCTION__ , local_cxy , hal_get_cycles() );
     
    188186    }
    189187
    190 #if CONFIG_DEBUG_CLUSTER_INIT
     188#if DEBUG_CLUSTER_INIT
    191189cycle = (uint32_t)hal_get_cycles();
    192 if( CONFIG_DEBUG_CLUSTER_INIT < cycle )
     190if( DEBUG_CLUSTER_INIT < cycle )
    193191printk("\n[DBG] %s , thread %x exit for cluster %x / cycle %d\n",
    194192__FUNCTION__ , CURRENT_THREAD , local_cxy , cycle );
     
    456454    pmgr_t * pm = &LOCAL_CLUSTER->pmgr;
    457455
    458 #if CONFIG_DEBUG_CLUSTER_PROCESS_COPIES
     456#if DEBUG_CLUSTER_PROCESS_COPIES
    459457uint32_t cycle = (uint32_t)hal_get_cycles();
    460 if( CONFIG_DEBUG_CLUSTER_PROCESS_COPIES < cycle )
     458if( DEBUG_CLUSTER_PROCESS_COPIES < cycle )
    461459printk("\n[DBG] %s enters / cluster %x / process %x / cycle %d\n",
    462460__FUNCTION__ , local_cxy , process , cycle );
     
    487485    remote_spinlock_unlock_busy( copies_lock , irq_state );
    488486
    489 #if CONFIG_DEBUG_CLUSTER_PROCESS_COPIES
     487#if DEBUG_CLUSTER_PROCESS_COPIES
    490488cycle = (uint32_t)hal_get_cycles();
    491 if( CONFIG_DEBUG_CLUSTER_PROCESS_COPIES < cycle )
     489if( DEBUG_CLUSTER_PROCESS_COPIES < cycle )
    492490printk("\n[DBG] %s exit / cluster %x / process %x / cycle %d\n",
    493491__FUNCTION__ , local_cxy , process , cycle );
     
    502500    pmgr_t * pm = &LOCAL_CLUSTER->pmgr;
    503501
    504 #if CONFIG_DEBUG_CLUSTER_PROCESS_COPIES
     502#if DEBUG_CLUSTER_PROCESS_COPIES
    505503uint32_t cycle = (uint32_t)hal_get_cycles();
    506 if( CONFIG_DEBUG_CLUSTER_PROCESS_COPIES < cycle )
     504if( DEBUG_CLUSTER_PROCESS_COPIES < cycle )
    507505printk("\n[DBG] %s enters / cluster %x / process %x / cycle %d\n",
    508506__FUNCTION__ , local_cxy , process , cycle );
     
    530528    remote_spinlock_unlock_busy( copies_lock , irq_state );
    531529
    532 #if CONFIG_DEBUG_CLUSTER_PROCESS_COPIES
     530#if DEBUG_CLUSTER_PROCESS_COPIES
    533531cycle = (uint32_t)hal_get_cycles();
    534 if( CONFIG_DEBUG_CLUSTER_PROCESS_COPIES < cycle )
     532if( DEBUG_CLUSTER_PROCESS_COPIES < cycle )
    535533printk("\n[DBG] %s exit / cluster %x / process %x / cycle %d\n",
    536534__FUNCTION__ , local_cxy , process , cycle );
  • trunk/kernel/kern/cluster.h

    r437 r438  
    132132
    133133    // DQDT
    134     int32_t           pages_var;         /*! pages number increment from last DQQT updt   */
    135     int32_t           threads_var;       /*! threads number increment from last DQDT updt */
    136 
    137134        dqdt_node_t       dqdt_tbl[CONFIG_DQDT_LEVELS_NR]; /*! embedded DQDT nodes in cluster */
    138135
  • trunk/kernel/kern/core.c

    r433 r438  
    8585        // handle scheduler
    8686        if( (ticks % CONFIG_SCHED_TICKS_PER_QUANTUM) == 0 ) sched_yield( "TICK");
    87 
    88         // update DQDT
    89         if( ((ticks % CONFIG_DQDT_TICKS_PER_QUANTUM) == 0) && (core->lid == 0) )
    90         dqdt_global_update();
    9187}
    9288
  • trunk/kernel/kern/do_syscall.c

    r437 r438  
    173173        int  error = 0;
    174174       
     175    assert( (this == CURRENT_THREAD), __FUNCTION__,
     176    "wrong <this> argument\n" );
     177
    175178    // update user time
    176179        thread_user_time_update( this );
     
    194197
    195198    // check kernel stack overflow
    196     assert( (this->signature == THREAD_SIGNATURE), __FUNCTION__, "kernel stack overflow\n" );
     199    assert( (CURRENT_THREAD->signature == THREAD_SIGNATURE), __FUNCTION__,
     200    "kernel stack overflow after for thread %x in cluster %x\n", CURRENT_THREAD, local_cxy );
    197201
    198202    // update kernel time
  • trunk/kernel/kern/dqdt.c

    r437 r438  
    2828#include <hal_remote.h>
    2929#include <printk.h>
     30#include <chdev.h>
    3031#include <cluster.h>
    3132#include <bits.h>
     
    3334
    3435
    35 ///////////////////////////////////////////
    36 void dqdt_local_print( dqdt_node_t * node )
    37 {
    38         printk("DQDT node : level = %d / cluster = %x / threads = %x / pages = %x\n",
    39                node->level,
    40                local_cxy,
    41                node->threads,
    42            node->pages );
    43 }
    44 
    45 /////////////////////////////////////////
    46 void dqdt_global_print( xptr_t  node_xp )
     36///////////////////////////////////////////////////////////////////////////////////////////
     37//      Extern variables
     38///////////////////////////////////////////////////////////////////////////////////////////
     39
     40extern chdev_directory_t  chdev_dir;  // defined in chdev.h / allocated in kernel_init.c
     41
     42
     43///////////////////////////////////////////////////////////////////////////////////////////
     44// This static recursive function traverse the DQDT quad-tree from root to bottom.
     45///////////////////////////////////////////////////////////////////////////////////////////
     46static void dqdt_recursive_print( xptr_t  node_xp )
    4747{
    4848        uint32_t i;
    49     dqdt_node_t local_node;
    50 
    51     // get root node local copy
    52     hal_remote_memcpy( XPTR( local_cxy , &local_node ), node_xp , sizeof(dqdt_node_t) );
    53 
    54     // display DQDT node content
    55     dqdt_local_print( &local_node );
     49    dqdt_node_t node;
     50
     51    // get node local copy
     52    hal_remote_memcpy( XPTR( local_cxy , &node ), node_xp , sizeof(dqdt_node_t) );
     53
     54    // display node content
     55        nolock_printk("- level %d in cluster %x (node %x) : threads = %x / pages = %x\n",
     56    node.level, GET_CXY( node_xp ), GET_PTR( node_xp ), node.threads, node.pages );
    5657
    5758    // recursive call on children if node is not terminal
    58     if ( local_node.level > 0 )
     59    if ( node.level > 0 )
    5960    {
    6061        for ( i = 0 ; i < 4 ; i++ )
    6162        {
    62             if ( local_node.children[i] != XPTR_NULL )
    63                 dqdt_global_print( local_node.children[i] );
     63            if ( node.children[i] != XPTR_NULL ) dqdt_recursive_print( node.children[i] );
    6464        }
    6565    }
     66}
     67
     68///////////////////
     69void dqdt_display()
     70{
     71    reg_t   save_sr;
     72
     73    // build extended pointer on DQDT root node
     74        cluster_t * cluster = LOCAL_CLUSTER;
     75    uint32_t    level   = cluster->dqdt_root_level;
     76    xptr_t      root_xp = XPTR( 0 , &cluster->dqdt_tbl[level] );
     77
     78    // get pointers on TXT0 chdev
     79    xptr_t    txt0_xp  = chdev_dir.txt_tx[0];
     80    cxy_t     txt0_cxy = GET_CXY( txt0_xp );
     81    chdev_t * txt0_ptr = GET_PTR( txt0_xp );
     82
     83    // get extended pointer on remote TXT0 chdev lock
     84    xptr_t  lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock );
     85
     86    // get TXT0 lock in busy waiting mode
     87    remote_spinlock_lock_busy( lock_xp , &save_sr );
     88
     89    // print header
     90    nolock_printk("\n***** DQDT state\n\n");
     91
     92    // call recursive function
     93    dqdt_recursive_print( root_xp );
     94
     95    // release lock
     96    remote_spinlock_unlock_busy( lock_xp , save_sr );
    6697}
    6798
     
    161192} // end dqdt_init()
    162193
    163 
    164 ///////////////////////////////////////////////////////////////////////////
    165 // This recursive function is called by the dqdt_global_update() function.
     194///////////////////////////////////////////////////////////////////////////
     195// This recursive function is called by the dqdt_update_threads() function.
    166196// It traverses the quad tree from clusters to root.
    167197///////////////////////////////////////////////////////////////////////////
    168 static void dqdt_propagate( xptr_t  node,         // extended pointer on current node
    169                             int32_t threads_var,  // number of threads variation
    170                             int32_t pages_var )   // number of pages variation
     198// @ node       : extended pointer on current node
     199// @ increment  : number of threads variation
     200///////////////////////////////////////////////////////////////////////////
     201static void dqdt_propagate_threads( xptr_t  node,
     202                                    int32_t increment )
    171203{
    172204    // get current node cluster identifier and local pointer
    173     cxy_t         cxy = (cxy_t)GET_CXY( node );
    174     dqdt_node_t * ptr = (dqdt_node_t *)GET_PTR( node );
     205    cxy_t         cxy = GET_CXY( node );
     206    dqdt_node_t * ptr = GET_PTR( node );
    175207
    176208    // update current node threads number
    177     hal_remote_atomic_add( XPTR( cxy , &ptr->threads ) , threads_var );
    178 
    179     // update current node pages number
    180     hal_remote_atomic_add( XPTR( cxy , &ptr->pages ) , pages_var );
     209    hal_remote_atomic_add( XPTR( cxy , &ptr->threads ) , increment );
    181210
    182211    // get extended pointer on parent node
     
    184213
    185214    // propagate if required
    186     if ( parent != XPTR_NULL )
    187     {
    188         dqdt_propagate( parent, threads_var, pages_var );
    189     }
    190 }
    191 
    192 /////////////////////////
    193 void dqdt_global_update()
     215    if ( parent != XPTR_NULL ) dqdt_propagate_threads( parent, increment );
     216}
     217
     218///////////////////////////////////////////////////////////////////////////
     219// This recursive function is called by the dqdt_update_pages() function.
     220// It traverses the quad tree from clusters to root.
     221///////////////////////////////////////////////////////////////////////////
     222// @ node       : extended pointer on current node
     223// @ increment  : number of pages variation
     224///////////////////////////////////////////////////////////////////////////
     225static void dqdt_propagate_pages( xptr_t  node,
     226                                  int32_t increment )
     227{
     228    // get current node cluster identifier and local pointer
     229    cxy_t         cxy = GET_CXY( node );
     230    dqdt_node_t * ptr = GET_PTR( node );
     231
     232    // update current node threads number
     233    hal_remote_atomic_add( XPTR( cxy , &ptr->pages ) , increment );
     234
     235    // get extended pointer on parent node
     236    xptr_t parent = (xptr_t)hal_remote_lwd( XPTR( cxy , &ptr->parent ) );
     237
     238    // propagate if required
     239    if ( parent != XPTR_NULL ) dqdt_propagate_pages( parent, increment );
     240}
     241
     242/////////////////////////////////////////////
     243void dqdt_update_threads( int32_t increment )
    194244{
    195245        cluster_t   * cluster = LOCAL_CLUSTER;
    196246    dqdt_node_t * node    = &cluster->dqdt_tbl[0];
    197247
    198     // get variations
    199     int32_t      threads_var = cluster->threads_var;
    200     int32_t      pages_var   = cluster->pages_var;
    201 
    202     // propagate this variation to DQDT upper levels
    203     if( (threads_var || pages_var) && (node->parent != XPTR_NULL) )
    204     {
    205         dqdt_propagate( node->parent, threads_var, pages_var );
    206     }
    207 
    208     // update variations
    209     hal_atomic_add( &cluster->threads_var , -threads_var );
    210     hal_atomic_add( &cluster->pages_var   , -pages_var   );
    211 }
    212 
    213 ///////////////////////////////////////////////////
    214 void dqdt_local_update_threads( int32_t increment )
    215 {
    216         cluster_t * cluster = LOCAL_CLUSTER;
    217 
    218     // register change for future propagation in DQDT
    219     hal_atomic_add( &cluster->threads_var , increment );
    220 
    221248    // update DQDT node level 0
    222     hal_atomic_add( &cluster->dqdt_tbl[0].threads , increment );
    223 }
    224 
    225 /////////////////////////////////////////////////
    226 void dqdt_local_update_pages( int32_t increment )
    227 {
    228         cluster_t * cluster = LOCAL_CLUSTER;
    229 
    230     // register change for future propagation in DQDT
    231     hal_atomic_add( &cluster->pages_var , increment );
     249    hal_atomic_add( &node->threads , increment );
     250
     251    // propagate to DQDT upper levels
     252    if( node->parent != XPTR_NULL ) dqdt_propagate_threads( node->parent , increment );
     253}
     254
     255///////////////////////////////////////////
     256void dqdt_update_pages( int32_t increment )
     257{
     258        cluster_t   * cluster = LOCAL_CLUSTER;
     259    dqdt_node_t * node    = &cluster->dqdt_tbl[0];
    232260
    233261    // update DQDT node level 0
    234     hal_atomic_add( &cluster->dqdt_tbl[0].pages , increment );
    235 }
     262    hal_atomic_add( &node->pages , increment );
     263
     264    // propagate to DQDT upper levels
     265    if( node->parent != XPTR_NULL ) dqdt_propagate_pages( node->parent , increment );
     266}
     267
    236268
    237269////////////////////////////////////////////////////////////////////////////////
     
    289321        cluster_t * cluster = LOCAL_CLUSTER;
    290322    uint32_t    level   = cluster->dqdt_root_level;
    291     xptr_t      root    = XPTR( 0 , &cluster->dqdt_tbl[level] );
     323    xptr_t      root_xp = XPTR( 0 , &cluster->dqdt_tbl[level] );
    292324
    293325    // call recursive function
    294     return dqdt_select_cluster( root , false );
     326    return dqdt_select_cluster( root_xp , false );
    295327}
    296328
     
    301333        cluster_t * cluster = LOCAL_CLUSTER;
    302334    uint32_t    level   = cluster->dqdt_root_level;
    303     xptr_t      root    = XPTR( 0 , &cluster->dqdt_tbl[level] );
     335    xptr_t      root_xp = XPTR( 0 , &cluster->dqdt_tbl[level] );
    304336
    305337    // call recursive function
    306     return dqdt_select_cluster( root , true );
    307 }
    308 
     338    return dqdt_select_cluster( root_xp , true );
     339}
     340
  • trunk/kernel/kern/dqdt.h

    r437 r438  
    9393
    9494/****************************************************************************************
    95  * This recursive function traverses the DQDT quad-tree from bottom to root, to propagate
    96  * the change in the threads number and allocated pages number in a leaf cluster,
    97  * toward the upper levels of the DQDT quad-tree.
    98  * It should be called periodically by each instance of the kernel.
    99  ***************************************************************************************/
    100 void dqdt_global_update();
    101 
    102 /****************************************************************************************
    103  * This local function updates both the total number of threads,
    104  * in the level 0 DQDT node, and the variation of the number of threads
    105  * for future propagation to the DQDT upper levels.
     95 * This local function updates the total number of threads in level 0 DQDT node,
     96 * and propagates the variation to the DQDT upper levels.
    10697 * It should be called on each thread creation or destruction.
    10798 ****************************************************************************************
    10899 * @ increment : increment (can be positive or negative)
    109100 ***************************************************************************************/
    110 void dqdt_local_update_threads( int32_t  increment );
     101void dqdt_update_threads( int32_t  increment );
    111102
    112103/****************************************************************************************
    113  * This local function updates both the total number of allocated pages,
    114  * in the level 0 DQDT node, and the variation of the number of pages
    115  * for future propagation to the DQDT upper levels.
    116  * It should be called on each memory allocation or release.
     104 * This local function updates the total number of pages in level 0 DQDT node,
     105 * and propagates the variation to the DQDT upper levels.
     106 * It should be called on each physical memory page allocation or release.
    117107 ****************************************************************************************
    118108 * @ increment : increment (can be positive or negative)
    119109 ***************************************************************************************/
    120 void dqdt_local_update_pages( int32_t increment );
     110void dqdt_update_pages( int32_t increment );
    121111
    122112/****************************************************************************************
     
    139129
    140130/****************************************************************************************
    141  * This recursive function displays usage information for all DQDT nodes in the subtree
    142  * defined by the node argument. It traverses the quadtree from root to bottom.
    143  ****************************************************************************************
    144  * @ node_xp   : extended pointer on a DQDT node.
     131 * This function displays on kernel TXT0 the DQDT state for all nodes in the quad-tree.
     132 * It traverses the quadtree from root to bottom, and can be called by a thread
     133 * running in any cluster
    145134 ***************************************************************************************/
    146 void dqdt_global_print( xptr_t  node_xp );
    147 
    148 /****************************************************************************************
    149  * This function displays summary usage information in a given DQDT local node.
    150  ****************************************************************************************
    151  * @ node   : local pointer on a DQDT node.
    152  ***************************************************************************************/
    153 void dqdt_local_print( dqdt_node_t * node );
     135void dqdt_display();
    154136
    155137
  • trunk/kernel/kern/kernel_init.c

    r437 r438  
    125125// these debug variables are used to analyse the sys_read() syscall timing
    126126
    127 #if CONFIG_DEBUG_SYS_READ
     127#if DEBUG_SYS_READ
    128128uint32_t   enter_sys_read;
    129129uint32_t   exit_sys_read;
     
    150150// these debug variables are used to analyse the sys_write() syscall timing
    151151
    152 #if CONFIG_DEBUG_SYS_WRITE   
     152#if DEBUG_SYS_WRITE   
    153153uint32_t   enter_sys_write;
    154154uint32_t   exit_sys_write;
     
    324324            }
    325325
    326 #if( CONFIG_DEBUG_KERNEL_INIT & 0x1 )
    327 if( hal_time_stamp() > CONFIG_DEBUG_KERNEL_INIT )
     326#if( DEBUG_KERNEL_INIT & 0x1 )
     327if( hal_time_stamp() > DEBUG_KERNEL_INIT )
    328328printk("\n[DBG] %s : created MMC in cluster %x / chdev = %x\n",
    329329__FUNCTION__ , local_cxy , chdev_ptr );
     
    353353                chdev_dir.dma[channel] = XPTR( local_cxy , chdev_ptr );
    354354
    355 #if( CONFIG_DEBUG_KERNEL_INIT & 0x1 )
    356 if( hal_time_stamp() > CONFIG_DEBUG_KERNEL_INIT )
     355#if( DEBUG_KERNEL_INIT & 0x1 )
     356if( hal_time_stamp() > DEBUG_KERNEL_INIT )
    357357printk("\n[DBG] %s : created DMA[%d] in cluster %x / chdev = %x\n",
    358358__FUNCTION__ , channel , local_cxy , chdev_ptr );
     
    488488                    }
    489489
    490 #if( CONFIG_DEBUG_KERNEL_INIT & 0x1 )
    491 if( hal_time_stamp() > CONFIG_DEBUG_KERNEL_INIT )
     490#if( DEBUG_KERNEL_INIT & 0x1 )
     491if( hal_time_stamp() > DEBUG_KERNEL_INIT )
    492492printk("\n[DBG] %s : create chdev %s / channel = %d / rx = %d / cluster %x / chdev = %x\n",
    493493__FUNCTION__ , chdev_func_str( func ), channel , rx , local_cxy , chdev );
     
    623623    }
    624624
    625 #if( CONFIG_DEBUG_KERNEL_INIT & 0x1 )
    626 if( hal_time_stamp() > CONFIG_DEBUG_KERNEL_INIT )
     625#if( DEBUG_KERNEL_INIT & 0x1 )
     626if( hal_time_stamp() > DEBUG_KERNEL_INIT )
    627627{
    628628    printk("\n[DBG] %s created PIC chdev in cluster %x at cycle %d\n",
     
    807807    /////////////////////////////////////////////////////////////////////////////////
    808808
    809 #if CONFIG_DEBUG_KERNEL_INIT
    810 if( (core_lid ==  0) && (local_cxy == 0) )
     809#if DEBUG_KERNEL_INIT
     810if( (core_lid ==  0) & (local_cxy == 0) )
    811811printk("\n[DBG] %s : exit barrier 0 : TXT0 initialized / cycle %d\n",
    812812__FUNCTION__, (uint32_t)hal_get_cycles() );
     
    845845    /////////////////////////////////////////////////////////////////////////////////
    846846
    847 #if CONFIG_DEBUG_KERNEL_INIT
    848 if( (core_lid ==  0) && (local_cxy == 0) )
     847#if DEBUG_KERNEL_INIT
     848if( (core_lid ==  0) & (local_cxy == 0) )
    849849printk("\n[DBG] %s : exit barrier 1 : clusters initialised / cycle %d\n",
    850850__FUNCTION__, (uint32_t)hal_get_cycles() );
     
    872872    ////////////////////////////////////////////////////////////////////////////////
    873873
    874 #if CONFIG_DEBUG_KERNEL_INIT
    875 if( (core_lid ==  0) && (local_cxy == 0) )
     874#if DEBUG_KERNEL_INIT
     875if( (core_lid ==  0) & (local_cxy == 0) )
    876876printk("\n[DBG] %s : exit barrier 2 : PIC initialised / cycle %d\n",
    877877__FUNCTION__, (uint32_t)hal_get_cycles() );
     
    905905    /////////////////////////////////////////////////////////////////////////////////
    906906
    907 #if CONFIG_DEBUG_KERNEL_INIT
    908 if( (core_lid ==  0) && (local_cxy == 0) )
     907#if DEBUG_KERNEL_INIT
     908if( (core_lid ==  0) & (local_cxy == 0) )
    909909printk("\n[DBG] %s : exit barrier 3 : all chdev initialised / cycle %d\n",
    910910__FUNCTION__, (uint32_t)hal_get_cycles() );
    911911#endif
    912912
    913 #if( CONFIG_DEBUG_KERNEL_INIT & 1 )
     913#if( DEBUG_KERNEL_INIT & 1 )
    914914chdev_dir_display();
    915915#endif
     
    927927
    928928    // all cores initialize the idle thread descriptor
    929     error = thread_kernel_init( thread,
    930                                 THREAD_IDLE,
    931                                 &thread_idle_func,
    932                                 NULL,
    933                                 core_lid );
     929    error = thread_idle_init( thread,
     930                              THREAD_IDLE,
     931                              &thread_idle_func,
     932                              NULL,
     933                              core_lid );
    934934    if( error )
    935935    {
     
    942942    core->scheduler.idle = thread;
    943943
    944 #if( CONFIG_DEBUG_KERNEL_INIT & 1 )
     944#if( DEBUG_KERNEL_INIT & 1 )
    945945sched_display( core_lid );
    946946#endif
     
    10141014    /////////////////////////////////////////////////////////////////////////////////
    10151015
    1016 #if CONFIG_DEBUG_KERNEL_INIT
    1017 if( (core_lid ==  0) && (local_cxy == 0) )
     1016#if DEBUG_KERNEL_INIT
     1017if( (core_lid ==  0) & (local_cxy == 0) )
    10181018printk("\n[DBG] %s : exit barrier 4 : VFS_root = %l in cluster 0 / cycle %d\n",
    10191019__FUNCTION__, vfs_root_inode_xp , (uint32_t)hal_get_cycles());
     
    10751075    /////////////////////////////////////////////////////////////////////////////////
    10761076
    1077 #if CONFIG_DEBUG_KERNEL_INIT
    1078 if( (core_lid ==  0) && (local_cxy == io_cxy) )
     1077#if DEBUG_KERNEL_INIT
     1078if( (core_lid ==  0) & (local_cxy == 0) )
    10791079printk("\n[DBG] %s : exit barrier 5 : VFS_root = %l in cluster %x / cycle %d\n",
    10801080__FUNCTION__, vfs_root_inode_xp , io_cxy , (uint32_t)hal_get_cycles());
     
    11101110    /////////////////////////////////////////////////////////////////////////////////
    11111111
    1112 #if CONFIG_DEBUG_KERNEL_INIT
    1113 if( (core_lid ==  0) && (local_cxy == io_cxy) )
     1112#if DEBUG_KERNEL_INIT
     1113if( (core_lid ==  0) & (local_cxy == 0) )
    11141114printk("\n[DBG] %s : exit barrier 6 : dev_root = %l in cluster %x / cycle %d\n",
    11151115__FUNCTION__, devfs_dev_inode_xp , io_cxy , (uint32_t)hal_get_cycles() );
     
    11491149    /////////////////////////////////////////////////////////////////////////////////
    11501150
    1151 #if CONFIG_DEBUG_KERNEL_INIT
    1152 if( (core_lid ==  0) && (local_cxy == 0) )
     1151#if DEBUG_KERNEL_INIT
     1152if( (core_lid ==  0) & (local_cxy == 0) )
    11531153printk("\n[DBG] %s : exit barrier 7 : dev_root = %l in cluster 0 / cycle %d\n",
    11541154__FUNCTION__, devfs_dev_inode_xp , (uint32_t)hal_get_cycles() );
     
    11621162    {
    11631163
    1164 #if( CONFIG_DEBUG_KERNEL_INIT & 1 )
     1164#if( DEBUG_KERNEL_INIT & 1 )
    11651165vfs_display( vfs_root_inode_xp );
    11661166#endif
     
    11751175    /////////////////////////////////////////////////////////////////////////////////
    11761176
    1177 #if CONFIG_DEBUG_KERNEL_INIT
    1178 if( (core_lid ==  0) && (local_cxy == 0) )
     1177#if DEBUG_KERNEL_INIT
     1178if( (core_lid ==  0) & (local_cxy == 0) )
    11791179printk("\n[DBG] %s : exit barrier 8 : process init created / cycle %d\n",
    11801180__FUNCTION__ , (uint32_t)hal_get_cycles() );
     
    11891189        print_banner( (info->x_size * info->y_size) , info->cores_nr );
    11901190
    1191 #if( CONFIG_DEBUG_KERNEL_INIT & 1 )
     1191#if( DEBUG_KERNEL_INIT & 1 )
    11921192printk("\n\n***** memory fooprint for main kernel objects\n\n"
    11931193                   " - thread descriptor  : %d bytes\n"
  • trunk/kernel/kern/process.c

    r437 r438  
    124124    model_pid  = hal_remote_lw( XPTR( model_cxy  , &model_ptr->pid ) );
    125125
    126 #if CONFIG_DEBUG_PROCESS_REFERENCE_INIT
     126#if DEBUG_PROCESS_REFERENCE_INIT
    127127uint32_t cycle = (uint32_t)hal_get_cycles();
    128 if( CONFIG_DEBUG_PROCESS_REFERENCE_INIT )
     128if( DEBUG_PROCESS_REFERENCE_INIT )
    129129printk("\n[DBG] %s : thread %x enter / pid = %x / ppid = %x / model_pid = %x / cycle %d\n",
    130130__FUNCTION__ , CURRENT_THREAD , pid , parent_pid , model_pid , cycle );
     
    141141    assert( (error == 0) , __FUNCTION__ , "cannot initialize VMM\n" );
    142142 
    143 #if (CONFIG_DEBUG_PROCESS_REFERENCE_INIT & 1)
    144 cycle = (uint32_t)hal_get_cycles();
    145 if( CONFIG_DEBUG_PROCESS_REFERENCE_INIT )
     143#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
     144cycle = (uint32_t)hal_get_cycles();
     145if( DEBUG_PROCESS_REFERENCE_INIT )
    146146printk("\n[DBG] %s : thread %x / vmm empty for process %x / cycle %d\n",
    147147__FUNCTION__ , CURRENT_THREAD , pid , cycle );
     
    232232    remote_rwlock_init( XPTR( local_cxy , &process->cwd_lock ) );
    233233
    234 #if (CONFIG_DEBUG_PROCESS_REFERENCE_INIT & 1)
    235 cycle = (uint32_t)hal_get_cycles();
    236 if( CONFIG_DEBUG_PROCESS_REFERENCE_INIT )
     234#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
     235cycle = (uint32_t)hal_get_cycles();
     236if( DEBUG_PROCESS_REFERENCE_INIT )
    237237printk("\n[DBG] %s : thread %x / fd_array for process %x / cycle %d\n",
    238238__FUNCTION__ , CURRENT_THREAD , pid , cycle );
     
    272272        hal_fence();
    273273
    274 #if (CONFIG_DEBUG_PROCESS_REFERENCE_INIT & 1)
    275 cycle = (uint32_t)hal_get_cycles();
    276 if( CONFIG_DEBUG_PROCESS_REFERENCE_INIT )
     274#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
     275cycle = (uint32_t)hal_get_cycles();
     276if( DEBUG_PROCESS_REFERENCE_INIT )
    277277printk("\n[DBG] %s : thread %x exit / process %x / cycle %d\n",
    278278__FUNCTION__ , CURRENT_THREAD , pid , cycle );
     
    297297    local_process->term_state = 0;
    298298
    299 #if CONFIG_DEBUG_PROCESS_COPY_INIT
     299#if DEBUG_PROCESS_COPY_INIT
    300300uint32_t cycle = (uint32_t)hal_get_cycles();
    301 if( CONFIG_DEBUG_PROCESS_COPY_INIT )
     301if( DEBUG_PROCESS_COPY_INIT )
    302302printk("\n[DBG] %s : thread %x enter for process %x\n",
    303303__FUNCTION__ , CURRENT_THREAD , local_process->pid );
     
    347347        hal_fence();
    348348
    349 #if CONFIG_DEBUG_PROCESS_COPY_INIT
    350 cycle = (uint32_t)hal_get_cycles();
    351 if( CONFIG_DEBUG_PROCESS_COPY_INIT )
     349#if DEBUG_PROCESS_COPY_INIT
     350cycle = (uint32_t)hal_get_cycles();
     351if( DEBUG_PROCESS_COPY_INIT )
    352352printk("\n[DBG] %s : thread %x exit for process %x\n",
    353353__FUNCTION__ , CURRENT_THREAD , local_process->pid );
     
    371371    "process %x in cluster %x has still active threads", pid , local_cxy );
    372372
    373 #if CONFIG_DEBUG_PROCESS_DESTROY
     373#if DEBUG_PROCESS_DESTROY
    374374uint32_t cycle = (uint32_t)hal_get_cycles();
    375 if( CONFIG_DEBUG_PROCESS_DESTROY )
     375if( DEBUG_PROCESS_DESTROY )
    376376printk("\n[DBG] %s : thread %x enter to destroy process %x (pid = %x) / cycle %d\n",
    377377__FUNCTION__ , CURRENT_THREAD , process, pid , cycle );
    378 #endif
    379 
    380 #if CONFIG_DEBUG_PROCESS_DESTROY
    381 if( CONFIG_DEBUG_PROCESS_DESTROY  & 1 )
    382 cluster_processes_display( CXY_FROM_PID( pid ) );
    383378#endif
    384379
     
    422417    process_free( process );
    423418
    424 #if CONFIG_DEBUG_PROCESS_DESTROY
    425 cycle = (uint32_t)hal_get_cycles();
    426 if( CONFIG_DEBUG_PROCESS_DESTROY )
     419#if DEBUG_PROCESS_DESTROY
     420cycle = (uint32_t)hal_get_cycles();
     421if( DEBUG_PROCESS_DESTROY )
    427422printk("\n[DBG] %s : thread %x exit / destroyed process %x (pid = %x) / cycle %d\n",
    428423__FUNCTION__ , CURRENT_THREAD , process, pid, cycle );
     
    457452    thread_t * client = CURRENT_THREAD;
    458453
    459 #if CONFIG_DEBUG_PROCESS_SIGACTION
     454#if DEBUG_PROCESS_SIGACTION
    460455uint32_t cycle = (uint32_t)hal_get_cycles();
    461 if( CONFIG_DEBUG_PROCESS_SIGACTION < cycle )
     456if( DEBUG_PROCESS_SIGACTION < cycle )
    462457printk("\n[DBG] %s : thread %x enter to %s process %x / cycle %d\n",
    463458__FUNCTION__ , client, process_action_str( action_type ) , pid , cycle );
     
    483478    // it can be shared because all parallel, non-blocking, server threads
    484479    // use the same input arguments, and use the shared RPC response field
    485     // but use
    486480
    487481    // the client thread makes the following sequence:
     
    502496
    503497    // initialize shared RPC descriptor
    504     rpc.response = 0;
    505     rpc.blocking = false;
    506     rpc.index    = RPC_PROCESS_SIGACTION;
    507     rpc.thread   = client;
    508     rpc.lid      = client->core->lid;
    509     rpc.args[0]  = action_type;
    510     rpc.args[1]  = pid;
     498    rpc.responses = 0;
     499    rpc.blocking  = false;
     500    rpc.index     = RPC_PROCESS_SIGACTION;
     501    rpc.thread    = client;
     502    rpc.lid       = client->core->lid;
     503    rpc.args[0]   = action_type;
     504    rpc.args[1]   = pid;
    511505
    512506    // send RPCs to all clusters containing process copiess
     
    514508    {
    515509
    516 #if CONFIG_DEBUG_PROCESS_SIGACTION
    517 if( CONFIG_DEBUG_PROCESS_SIGACTION < cycle )
     510#if DEBUG_PROCESS_SIGACTION
     511if( DEBUG_PROCESS_SIGACTION < cycle )
    518512printk("\n[DBG] %s : send RPC to %s process %x in cluster %x\n",
    519513__FUNCTION__ , process_action_str( action_type ) , pid , process_cxy );
    520514#endif
    521515        // atomically increment responses counter
    522         hal_atomic_add( (void *)&rpc.response , 1 );
     516        hal_atomic_add( (void *)&rpc.responses , 1 );
    523517
    524518        process_xp  = XLIST_ELEMENT( iter_xp , process_t , copies_list );
     
    538532    sched_yield("blocked on rpc_process_sigaction");
    539533
    540 #if CONFIG_DEBUG_PROCESS_SIGACTION
    541 cycle = (uint32_t)hal_get_cycles();
    542 if( CONFIG_DEBUG_PROCESS_SIGACTION < cycle )
     534#if DEBUG_PROCESS_SIGACTION
     535cycle = (uint32_t)hal_get_cycles();
     536if( DEBUG_PROCESS_SIGACTION < cycle )
    543537printk("\n[DBG] %s : thread %x exit after %s process %x in cluster %x / cycle %d\n",
    544538__FUNCTION__ , client, process_action_str( action_type ) , pid , local_cxy , cycle );
     
    563557    owner_cxy = CXY_FROM_PID( process->pid );
    564558
    565 #if CONFIG_DEBUG_PROCESS_SIGACTION
     559#if DEBUG_PROCESS_SIGACTION
    566560uint32_t cycle = (uint32_t)hal_get_cycles();
    567 if( CONFIG_DEBUG_PROCESS_SIGACTION < cycle )
     561if( DEBUG_PROCESS_SIGACTION < cycle )
    568562printk("\n[DBG] %s : thread %x enter for process %x in cluster %x / cycle %d\n",
    569563__FUNCTION__ , this , process->pid , local_cxy , cycle );
     
    623617    }
    624618
    625 #if CONFIG_DEBUG_PROCESS_SIGACTION
    626 cycle = (uint32_t)hal_get_cycles();
    627 if( CONFIG_DEBUG_PROCESS_SIGACTION < cycle )
     619#if DEBUG_PROCESS_SIGACTION
     620cycle = (uint32_t)hal_get_cycles();
     621if( DEBUG_PROCESS_SIGACTION < cycle )
    628622printk("\n[DBG] %s : thread %x exit for process %x in cluster %x / cycle %d\n",
    629623__FUNCTION__ , this , process->pid , local_cxy , cycle );
     
    643637    this = CURRENT_THREAD;
    644638
    645 #if CONFIG_DEBUG_PROCESS_SIGACTION
     639#if DEBUG_PROCESS_SIGACTION
    646640uint32_t cycle = (uint32_t)hal_get_cycles();
    647 if( CONFIG_DEBUG_PROCESS_SIGACTION < cycle )
     641if( DEBUG_PROCESS_SIGACTION < cycle )
    648642printk("\n[DBG] %s : thread %x enter for process %x in cluster %x / cycle %d\n",
    649643__FUNCTION__ , this , process->pid , local_cxy , cycle );
     
    671665    spinlock_unlock( &process->th_lock );
    672666
    673 #if CONFIG_DEBUG_PROCESS_SIGACTION
    674 cycle = (uint32_t)hal_get_cycles();
    675 if( CONFIG_DEBUG_PROCESS_SIGACTION < cycle )
     667#if DEBUG_PROCESS_SIGACTION
     668cycle = (uint32_t)hal_get_cycles();
     669if( DEBUG_PROCESS_SIGACTION < cycle )
    676670printk("\n[DBG] %s : thread %x exit for process %x in cluster %x / cycle %d\n",
    677671__FUNCTION__ , this , process->pid , local_cxy , cycle );
     
    687681    uint32_t            count;         // threads counter
    688682
    689 #if CONFIG_DEBUG_PROCESS_SIGACTION
     683#if DEBUG_PROCESS_SIGACTION
    690684uint32_t cycle = (uint32_t)hal_get_cycles();
    691 if( CONFIG_DEBUG_PROCESS_SIGACTION < cycle )
     685if( DEBUG_PROCESS_SIGACTION < cycle )
    692686printk("\n[DBG] %s : thread %x enter for process %x in cluster %x / cycle %d\n",
    693687__FUNCTION__ , CURRENT_THREAD , process->pid , local_cxy , cycle );
     
    716710    spinlock_unlock( &process->th_lock );
    717711
    718 #if CONFIG_DEBUG_PROCESS_SIGACTION
    719 cycle = (uint32_t)hal_get_cycles();
    720 if( CONFIG_DEBUG_PROCESS_SIGACTION < cycle )
     712#if DEBUG_PROCESS_SIGACTION
     713cycle = (uint32_t)hal_get_cycles();
     714if( DEBUG_PROCESS_SIGACTION < cycle )
    721715printk("\n[DBG] %s : thread %x exit for process %x in cluster %x / cycle %d\n",
    722716__FUNCTION__ , CURRENT_THREAD , process->pid , local_cxy , cycle );
     
    10361030    vfs_bin_xp = hal_remote_lwd(XPTR( parent_process_cxy , &parent_process_ptr->vfs_bin_xp));
    10371031
    1038     // check parent process is the reference
     1032    // check parent process is the reference process
    10391033    ref_xp = hal_remote_lwd( XPTR( parent_process_cxy , &parent_process_ptr->ref_xp ) );
     1034
     1035printk("\n@@@ %s : parent_cxy = %x / parent_ptr = %x / ref_cxy = %x / ref_ptr = %x\n",
     1036__FUNCTION__, parent_process_cxy, parent_process_ptr, GET_CXY( ref_xp ), GET_PTR( ref_xp ) );
     1037
    10401038    assert( (parent_process_xp == ref_xp ) , __FUNCTION__ ,
    10411039    "parent process must be the reference process\n" );
    10421040
    1043 #if CONFIG_DEBUG_PROCESS_MAKE_FORK
     1041#if DEBUG_PROCESS_MAKE_FORK
    10441042uint32_t cycle = (uint32_t)hal_get_cycles();
    1045 if( CONFIG_DEBUG_PROCESS_MAKE_FORK < cycle )
    1046 printk("\n[DBG] %s : thread %x enter for process %x / cycle %d\n",
    1047 __FUNCTION__, CURRENT_THREAD, parent_pid, cycle );
     1043if( DEBUG_PROCESS_MAKE_FORK < cycle )
     1044printk("\n[DBG] %s : thread %x enter for process %x / cluster %x / cycle %d\n",
     1045__FUNCTION__, CURRENT_THREAD, parent_pid, local_cxy, cycle );
    10481046#endif
    10491047
     
    10731071                            parent_process_xp );
    10741072
    1075 #if( CONFIG_DEBUG_PROCESS_MAKE_FORK & 1 )
    1076 cycle = (uint32_t)hal_get_cycles();
    1077 if( CONFIG_DEBUG_PROCESS_MAKE_FORK < cycle )
     1073#if( DEBUG_PROCESS_MAKE_FORK & 1 )
     1074cycle = (uint32_t)hal_get_cycles();
     1075if( DEBUG_PROCESS_MAKE_FORK < cycle )
    10781076printk("\n[DBG] %s : thread %x created child_process %x / child_pid %x / cycle %d\n",
    10791077__FUNCTION__, CURRENT_THREAD, process, new_pid, cycle );
     
    10921090    }
    10931091
    1094 #if( CONFIG_DEBUG_PROCESS_MAKE_FORK & 1 )
    1095 cycle = (uint32_t)hal_get_cycles();
    1096 if( CONFIG_DEBUG_PROCESS_MAKE_FORK < cycle )
     1092#if( DEBUG_PROCESS_MAKE_FORK & 1 )
     1093cycle = (uint32_t)hal_get_cycles();
     1094if( DEBUG_PROCESS_MAKE_FORK < cycle )
    10971095printk("\n[DBG] %s : thread %x copied VMM from parent %x to child %x / cycle %d\n",
    10981096__FUNCTION__ , CURRENT_THREAD , parent_pid, new_pid, cycle );
     
    11151113    }
    11161114
    1117     // check main thread index
    1118     assert( (thread->trdid == 0) , __FUNCTION__ , "main thread must have index 0\n" );
    1119 
    1120 #if( CONFIG_DEBUG_PROCESS_MAKE_FORK & 1 )
    1121 cycle = (uint32_t)hal_get_cycles();
    1122 if( CONFIG_DEBUG_PROCESS_MAKE_FORK < cycle )
     1115    // check main thread LTID
     1116    assert( (LTID_FROM_TRDID(thread->trdid) == 0) , __FUNCTION__ ,
     1117    "main thread must have LTID == 0\n" );
     1118
     1119#if( DEBUG_PROCESS_MAKE_FORK & 1 )
     1120cycle = (uint32_t)hal_get_cycles();
     1121if( DEBUG_PROCESS_MAKE_FORK < cycle )
    11231122printk("\n[DBG] %s : thread %x created child thread %x / cycle %d\n",
    11241123__FUNCTION__ , CURRENT_THREAD, thread, cycle );
     
    11401139    vmm_set_cow( process );
    11411140 
    1142 #if( CONFIG_DEBUG_PROCESS_MAKE_FORK & 1 )
    1143 cycle = (uint32_t)hal_get_cycles();
    1144 if( CONFIG_DEBUG_PROCESS_MAKE_FORK < cycle )
     1141#if( DEBUG_PROCESS_MAKE_FORK & 1 )
     1142cycle = (uint32_t)hal_get_cycles();
     1143if( DEBUG_PROCESS_MAKE_FORK < cycle )
    11451144printk("\n[DBG] %s : thread %x set COW in parent and child / cycle %d\n",
    11461145__FUNCTION__ , CURRENT_THREAD, cycle );
     
    11621161    *child_pid    = new_pid;
    11631162
    1164 #if CONFIG_DEBUG_PROCESS_MAKE_FORK
    1165 cycle = (uint32_t)hal_get_cycles();
    1166 if( CONFIG_DEBUG_PROCESS_MAKE_FORK < cycle )
     1163#if DEBUG_PROCESS_MAKE_FORK
     1164cycle = (uint32_t)hal_get_cycles();
     1165if( DEBUG_PROCESS_MAKE_FORK < cycle )
    11671166printk("\n[DBG] %s : thread %x exit / cycle %d\n",
    11681167__FUNCTION__, CURRENT_THREAD, cycle );
     
    12051204    "must be called by the main thread\n" );
    12061205 
    1207 #if CONFIG_DEBUG_PROCESS_MAKE_EXEC
     1206#if DEBUG_PROCESS_MAKE_EXEC
    12081207uint32_t cycle = (uint32_t)hal_get_cycles();
    1209 if( CONFIG_DEBUG_PROCESS_MAKE_EXEC < cycle )
     1208if( DEBUG_PROCESS_MAKE_EXEC < cycle )
    12101209printk("\n[DBG] %s : thread %x enters for process %x / %s / cycle %d\n",
    12111210__FUNCTION__, old_thread, pid, path, cycle );
     
    12441243    process_txt_set_ownership( XPTR( local_cxy , new_process) );
    12451244
    1246 #if( CONFIG_DEBUG_PROCESS_MAKE_EXEC & 1 )
    1247 cycle = (uint32_t)hal_get_cycles();
    1248 if( CONFIG_DEBUG_PROCESS_MAKE_EXEC < cycle )
     1245#if( DEBUG_PROCESS_MAKE_EXEC & 1 )
     1246cycle = (uint32_t)hal_get_cycles();
     1247if( DEBUG_PROCESS_MAKE_EXEC < cycle )
    12491248printk("\n[DBG] %s : thread %x created new process %x / cycle %d \n",
    12501249__FUNCTION__ , old_thread , new_process , cycle );
     
    12611260        }
    12621261
    1263 #if( CONFIG_DEBUG_PROCESS_MAKE_EXEC & 1 )
    1264 cycle = (uint32_t)hal_get_cycles();
    1265 if( CONFIG_DEBUG_PROCESS_MAKE_EXEC < cycle )
     1262#if( DEBUG_PROCESS_MAKE_EXEC & 1 )
     1263cycle = (uint32_t)hal_get_cycles();
     1264if( DEBUG_PROCESS_MAKE_EXEC < cycle )
    12661265printk("\n[DBG] %s : thread %x registered code/data vsegs in new process %x / cycle %d\n",
    12671266__FUNCTION__, old_thread , new_process->pid , cycle );
     
    12901289        }
    12911290
    1292     // check main thread index
    1293     assert( (new_thread->trdid == 0) , __FUNCTION__ , "main thread must have index 0\n" );
    1294 
    1295 #if( CONFIG_DEBUG_PROCESS_MAKE_EXEC & 1 )
    1296 cycle = (uint32_t)hal_get_cycles();
    1297 if( CONFIG_DEBUG_PROCESS_MAKE_EXEC < cycle )
     1291    // check main thread LTID
     1292    assert( (LTID_FROM_TRDID(new_thread->trdid) == 0) , __FUNCTION__ ,
     1293    "main thread must have LTID == 0\n" );
     1294
     1295#if( DEBUG_PROCESS_MAKE_EXEC & 1 )
     1296cycle = (uint32_t)hal_get_cycles();
     1297if( DEBUG_PROCESS_MAKE_EXEC < cycle )
    12981298printk("\n[DBG] %s : thread %x created new_process main thread %x / cycle %d\n",
    12991299__FUNCTION__ , old_thread , new_thread , cycle );
     
    13271327    hal_fence();
    13281328
    1329 #if CONFIG_DEBUG_PROCESS_MAKE_EXEC
    1330 cycle = (uint32_t)hal_get_cycles();
    1331 if( CONFIG_DEBUG_PROCESS_MAKE_EXEC < cycle )
     1329#if DEBUG_PROCESS_MAKE_EXEC
     1330cycle = (uint32_t)hal_get_cycles();
     1331if( DEBUG_PROCESS_MAKE_EXEC < cycle )
    13321332printk("\n[DBG] %s : old_thread %x blocked / new_thread %x activated / cycle %d\n",
    13331333__FUNCTION__ , old_thread , new_thread , cycle );
     
    13421342{
    13431343
    1344 #if CONFIG_DEBUG_PROCESS_ZERO_CREATE
     1344#if DEBUG_PROCESS_ZERO_CREATE
    13451345uint32_t cycle = (uint32_t)hal_get_cycles();
    1346 if( CONFIG_DEBUG_PROCESS_ZERO_CREATE < cycle )
     1346if( DEBUG_PROCESS_ZERO_CREATE < cycle )
    13471347printk("\n[DBG] %s : thread %x enter / cycle %d\n", __FUNCTION__, CURRENT_THREAD, cycle );
    13481348#endif
     
    13701370        hal_fence();
    13711371
    1372 #if CONFIG_DEBUG_PROCESS_ZERO_CREATE
    1373 cycle = (uint32_t)hal_get_cycles();
    1374 if( CONFIG_DEBUG_PROCESS_ZERO_CREATE < cycle )
     1372#if DEBUG_PROCESS_ZERO_CREATE
     1373cycle = (uint32_t)hal_get_cycles();
     1374if( DEBUG_PROCESS_ZERO_CREATE < cycle )
    13751375printk("\n[DBG] %s : thread %x exit / cycle %d\n", __FUNCTION__, CURRENT_THREAD, cycle );
    13761376#endif
     
    13881388    error_t          error;
    13891389
    1390 #if CONFIG_DEBUG_PROCESS_INIT_CREATE
     1390#if DEBUG_PROCESS_INIT_CREATE
    13911391uint32_t cycle = (uint32_t)hal_get_cycles();
    1392 if( CONFIG_DEBUG_PROCESS_INIT_CREATE < cycle )
     1392if( DEBUG_PROCESS_INIT_CREATE < cycle )
    13931393printk("\n[DBG] %s : thread %x enter / cycle %d\n", __FUNCTION__, CURRENT_THREAD, cycle );
    13941394#endif
     
    14681468    hal_fence();
    14691469
    1470 #if CONFIG_DEBUG_PROCESS_INIT_CREATE
    1471 cycle = (uint32_t)hal_get_cycles();
    1472 if( CONFIG_DEBUG_PROCESS_INIT_CREATE < cycle )
     1470#if DEBUG_PROCESS_INIT_CREATE
     1471cycle = (uint32_t)hal_get_cycles();
     1472if( DEBUG_PROCESS_INIT_CREATE < cycle )
    14731473printk("\n[DBG] %s : thread %x exit / cycle %d\n", __FUNCTION__, CURRENT_THREAD, cycle );
    14741474#endif
     
    16051605    xptr_t      lock_xp;      // extended pointer on list lock in chdev
    16061606
    1607 #if CONFIG_DEBUG_PROCESS_TXT_ATTACH
     1607#if DEBUG_PROCESS_TXT_ATTACH
    16081608uint32_t cycle = (uint32_t)hal_get_cycles();
    1609 if( CONFIG_DEBUG_PROCESS_TXT_ATTACH < cycle )
     1609if( DEBUG_PROCESS_TXT_ATTACH < cycle )
    16101610printk("\n[DBG] %s : thread %x enter for process %x / txt_id = %d  / cycle %d\n",
    16111611__FUNCTION__, CURRENT_THREAD, process->pid, txt_id, cycle );
     
    16341634    remote_spinlock_unlock( lock_xp );
    16351635
    1636 #if CONFIG_DEBUG_PROCESS_TXT_ATTACH
    1637 cycle = (uint32_t)hal_get_cycles();
    1638 if( CONFIG_DEBUG_PROCESS_TXT_ATTACH < cycle )
     1636#if DEBUG_PROCESS_TXT_ATTACH
     1637cycle = (uint32_t)hal_get_cycles();
     1638if( DEBUG_PROCESS_TXT_ATTACH < cycle )
    16391639printk("\n[DBG] %s : thread %x exit for process %x / txt_id = %d / cycle %d\n",
    16401640__FUNCTION__, CURRENT_THREAD, process->pid, txt_id , cycle );
     
    16641664    "process descriptor not in owner cluster" );
    16651665
    1666 #if CONFIG_DEBUG_PROCESS_TXT_ATTACH
     1666#if DEBUG_PROCESS_TXT_ATTACH
    16671667uint32_t cycle = (uint32_t)hal_get_cycles();
    1668 if( CONFIG_DEBUG_PROCESS_TXT_ATTACH < cycle )
     1668if( DEBUG_PROCESS_TXT_ATTACH < cycle )
    16691669printk("\n[DBG] %s : thread %x enter for process %x / cycle %d\n",
    16701670__FUNCTION__, CURRENT_THREAD, process_pid, cycle );
     
    16901690    remote_spinlock_unlock( lock_xp );
    16911691
    1692 #if( CONFIG_DEBUG_PROCESS_TXT_ATTACH & 1 )
    1693 if( CONFIG_DEBUG_PROCESS_TXT_ATTACH < cycle )
     1692#if( DEBUG_PROCESS_TXT_ATTACH & 1 )
     1693if( DEBUG_PROCESS_TXT_ATTACH < cycle )
    16941694{
    16951695    xptr_t root_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.root );
     
    17061706#endif
    17071707
    1708 #if CONFIG_DEBUG_PROCESS_TXT_ATTACH
    1709 cycle = (uint32_t)hal_get_cycles();
    1710 if( CONFIG_DEBUG_PROCESS_TXT_ATTACH < cycle )
     1708#if DEBUG_PROCESS_TXT_ATTACH
     1709cycle = (uint32_t)hal_get_cycles();
     1710if( DEBUG_PROCESS_TXT_ATTACH < cycle )
    17111711printk("\n[DBG] %s : thread %x exit / process %x detached from TXT / cycle %d\n",
    17121712__FUNCTION__, CURRENT_THREAD, process->pid, cycle );
     
    17371737    "process descriptor not in owner cluster\n" );
    17381738
    1739 #if CONFIG_DEBUG_PROCESS_TXT_ATTACH
     1739#if DEBUG_PROCESS_TXT_ATTACH
    17401740uint32_t cycle = (uint32_t)hal_get_cycles();
    1741 if( CONFIG_DEBUG_PROCESS_TXT_ATTACH < cycle )
     1741if( DEBUG_PROCESS_TXT_ATTACH < cycle )
    17421742printk("\n[DBG] %s : thread %x enter for process %x / cycle %d\n",
    17431743__FUNCTION__, CURRENT_THREAD, process_pid, cycle );
     
    17551755    hal_remote_swd( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , process_xp );
    17561756
    1757 #if CONFIG_DEBUG_PROCESS_TXT_ATTACH
    1758 cycle = (uint32_t)hal_get_cycles();
    1759 if( CONFIG_DEBUG_PROCESS_TXT_ATTACH < cycle )
     1757#if DEBUG_PROCESS_TXT_ATTACH
     1758cycle = (uint32_t)hal_get_cycles();
     1759if( DEBUG_PROCESS_TXT_ATTACH < cycle )
    17601760printk("\n[DBG] %s : thread %x exit for process %x / cycle %d\n",
    17611761__FUNCTION__, CURRENT_THREAD, process_pid, cycle );
     
    17941794    "process descriptor not in owner cluster\n" );
    17951795
    1796 #if CONFIG_DEBUG_PROCESS_TXT_ATTACH
     1796#if DEBUG_PROCESS_TXT_ATTACH
    17971797uint32_t cycle = (uint32_t)hal_get_cycles();
    1798 if( CONFIG_DEBUG_PROCESS_TXT_ATTACH < cycle )
     1798if( DEBUG_PROCESS_TXT_ATTACH < cycle )
    17991799printk("\n[DBG] %s : thread %x enter / process %x / pid %x / cycle %d\n",
    18001800__FUNCTION__, CURRENT_THREAD, process_ptr, process_pid, cycle );
     
    18131813    txt_id   = hal_remote_lw ( XPTR( txt_cxy , &txt_ptr->channel ) );
    18141814
    1815 #if( CONFIG_DEBUG_PROCESS_TXT_ATTACH & 1 )
    1816 if( CONFIG_DEBUG_PROCESS_TXT_ATTACH < cycle )
     1815#if( DEBUG_PROCESS_TXT_ATTACH & 1 )
     1816if( DEBUG_PROCESS_TXT_ATTACH < cycle )
    18171817printk("\n[DBG] %s : file_ptr %x / txt_ptr %x / txt_id %d / owner_ptr = %x\n",
    18181818__FUNCTION__, GET_PTR(file_xp), txt_ptr, txt_id, GET_PTR(owner_xp) );
     
    18321832        {
    18331833
    1834 #if( CONFIG_DEBUG_PROCESS_TXT_ATTACH & 1 )
    1835 if( CONFIG_DEBUG_PROCESS_TXT_ATTACH < cycle )
     1834#if( DEBUG_PROCESS_TXT_ATTACH & 1 )
     1835if( DEBUG_PROCESS_TXT_ATTACH < cycle )
    18361836printk("\n[DBG] %s : process is not the KSH process => search the KSH\n", __FUNCTION__ );
    18371837#endif
     
    18511851                    hal_remote_swd( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , current_xp );
    18521852
    1853 #if CONFIG_DEBUG_PROCESS_TXT_ATTACH
    1854 cycle = (uint32_t)hal_get_cycles();
    1855 if( CONFIG_DEBUG_PROCESS_TXT_ATTACH < cycle )
     1853#if DEBUG_PROCESS_TXT_ATTACH
     1854cycle = (uint32_t)hal_get_cycles();
     1855if( DEBUG_PROCESS_TXT_ATTACH < cycle )
    18561856printk("\n[DBG] %s : thread %x exit / process %x to KSH process %x / cycle %d\n",
    18571857__FUNCTION__, CURRENT_THREAD, process_pid,
     
    18731873        {
    18741874
    1875 #if( CONFIG_DEBUG_PROCESS_TXT_ATTACH & 1 )
    1876 if( CONFIG_DEBUG_PROCESS_TXT_ATTACH < cycle )
     1875#if( DEBUG_PROCESS_TXT_ATTACH & 1 )
     1876if( DEBUG_PROCESS_TXT_ATTACH < cycle )
    18771877printk("\n[DBG] %s : process is the KSH process => search another\n", __FUNCTION__ );
    18781878#endif
     
    18931893                    hal_remote_swd( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , current_xp );
    18941894
    1895 #if CONFIG_DEBUG_PROCESS_TXT_ATTACH
    1896 cycle = (uint32_t)hal_get_cycles();
    1897 if( CONFIG_DEBUG_PROCESS_TXT_ATTACH < cycle )
     1895#if DEBUG_PROCESS_TXT_ATTACH
     1896cycle = (uint32_t)hal_get_cycles();
     1897if( DEBUG_PROCESS_TXT_ATTACH < cycle )
    18981898printk("\n[DBG] %s : thread %x exit / KSH process %x to process %x / cycle %d\n",
    18991899__FUNCTION__, CURRENT_THREAD, process_pid,
     
    19101910            hal_remote_swd( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , XPTR_NULL );
    19111911
    1912 #if CONFIG_DEBUG_PROCESS_TXT_ATTACH
    1913 cycle = (uint32_t)hal_get_cycles();
    1914 if( CONFIG_DEBUG_PROCESS_TXT_ATTACH < cycle )
     1912#if DEBUG_PROCESS_TXT_ATTACH
     1913cycle = (uint32_t)hal_get_cycles();
     1914if( DEBUG_PROCESS_TXT_ATTACH < cycle )
    19151915printk("\n[DBG] %s : thread %x exit / KSH process %x to nobody / cycle %d\n",
    19161916__FUNCTION__, CURRENT_THREAD, process_pid, cycle );
     
    19221922    {
    19231923
    1924 #if CONFIG_DEBUG_PROCESS_TXT_ATTACH
    1925 cycle = (uint32_t)hal_get_cycles();
    1926 if( CONFIG_DEBUG_PROCESS_TXT_ATTACH < cycle )
     1924#if DEBUG_PROCESS_TXT_ATTACH
     1925cycle = (uint32_t)hal_get_cycles();
     1926if( DEBUG_PROCESS_TXT_ATTACH < cycle )
    19271927printk("\n[DBG] %s : thread %x exit / process %x is not TXT owner / cycle %d\n",
    19281928__FUNCTION__, CURRENT_THREAD, process_pid, cycle );
  • trunk/kernel/kern/rpc.c

    r437 r438  
    4343
    4444/////////////////////////////////////////////////////////////////////////////////////////
    45 //        Debug macros for marshalling functions
    46 /////////////////////////////////////////////////////////////////////////////////////////
    47 
    48 #if CONFIG_DEBUG_RPC_MARSHALING
    49 
    50 #define RPC_DEBUG_ENTER                                                                \
    51 uint32_t cycle = (uint32_t)hal_get_cycles();                                           \
    52 if( cycle > CONFIG_DEBUG_RPC_MARSHALING )                                              \
    53 printk("\n[DBG] %s : enter thread %x on core[%x,%d] / cycle %d\n",                     \
    54 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle );
    55 
    56 #define RPC_DEBUG_EXIT                                                                 \
    57 cycle = (uint32_t)hal_get_cycles();                                                    \
    58 if( cycle > CONFIG_DEBUG_RPC_MARSHALING )                                              \
    59 printk("\n[DBG] %s : exit thread %x on core[%x,%d] / cycle %d\n",                      \
    60 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle );
    61 
    62 #else
    63 
    64 #define RPC_DEBUG_ENTER
    65 
    66 #define RPC_DEBUG_EXIT
    67 
    68 #endif
    69 
    70 /////////////////////////////////////////////////////////////////////////////////////////
    7145//      array of function pointers  (must be consistent with enum in rpc.h)
    7246/////////////////////////////////////////////////////////////////////////////////////////
     
    12296               rpc_desc_t * rpc )
    12397{
    124     volatile error_t   full = 0;
    125     thread_t         * this = CURRENT_THREAD;
    126     core_t           * core = this->core;
    127 
    128 #if CONFIG_DEBUG_RPC_SEND
     98    lid_t              server_core_lid;
     99    lid_t              client_core_lid;
     100    volatile error_t   full;
     101    thread_t         * this;
     102    cluster_t        * cluster;
     103
     104#if DEBUG_RPC_CLIENT_GENERIC
    129105uint32_t cycle = (uint32_t)hal_get_cycles();
    130 if( CONFIG_DEBUG_RPC_SEND < cycle )
     106if( DEBUG_RPC_CLIENT_GENERIC < cycle )
    131107printk("\n[DBG] %s : thread %x in cluster %x enter for rpc[%d] / rpc_ptr %x / cycle %d\n",
    132108__FUNCTION__, CURRENT_THREAD, local_cxy, rpc->index, rpc, cycle );
    133109#endif
    134110
    135     // register client thread pointer and core lid in RPC descriptor
     111    full            = 0;
     112    this            = CURRENT_THREAD;
     113    cluster         = LOCAL_CLUSTER;
     114    client_core_lid = this->core->lid;
     115
     116    // select a server_core index:
     117    // use client core index if possible / core 0 otherwise
     118    if( client_core_lid < hal_remote_lw( XPTR( server_cxy , &cluster->cores_nr ) ) )
     119    {
     120        server_core_lid = client_core_lid;
     121    }
     122    else
     123    {   
     124        server_core_lid = 0;
     125    }
     126
     127    // register client_thread pointer and client_core lid in RPC descriptor
    136128    rpc->thread = this;
    137     rpc->lid    = core->lid;
    138 
    139     // build an extended pointer on the RPC descriptor
     129    rpc->lid    = client_core_lid;
     130
     131    // build extended pointer on the RPC descriptor
    140132        xptr_t   desc_xp = XPTR( local_cxy , rpc );
    141133
     
    160152    hal_fence();
    161153       
    162     // send IPI to the remote core corresponding to the client core
    163         dev_pic_send_ipi( server_cxy , core->lid );
     154    // send IPI to the selected server core
     155        dev_pic_send_ipi( server_cxy , server_core_lid );
    164156
    165157    // wait RPC completion before returning if blocking RPC
     
    171163        {
    172164
    173 #if CONFIG_DEBUG_RPC_SEND
    174 cycle = (uint32_t)hal_get_cycles();
    175 if( CONFIG_DEBUG_RPC_SEND < cycle )
     165#if DEBUG_RPC_CLIENT_GENERIC
     166cycle = (uint32_t)hal_get_cycles();
     167if( DEBUG_RPC_CLIENT_GENERIC < cycle )
    176168printk("\n[DBG] %s : thread %x in cluster %x busy waiting / rpc[%d] / cycle %d\n",
    177169__FUNCTION__, CURRENT_THREAD, local_cxy, rpc->index , cycle );
    178170#endif
    179171
    180             while( rpc->response ) hal_fixed_delay( 100 );
     172            while( rpc->responses ) hal_fixed_delay( 100 );
    181173   
    182 #if CONFIG_DEBUG_RPC_SEND
    183 cycle = (uint32_t)hal_get_cycles();
    184 if( CONFIG_DEBUG_RPC_SEND < cycle )
    185 printk("\n[DBG] %s : thread % in cluster %x resume / rpc[%d] / cycle %d\n",
     174#if DEBUG_RPC_CLIENT_GENERIC
     175cycle = (uint32_t)hal_get_cycles();
     176if( DEBUG_RPC_CLIENT_GENERIC < cycle )
     177printk("\n[DBG] %s : thread %x in cluster %x resumes / rpc[%d] / cycle %d\n",
    186178__FUNCTION__, CURRENT_THREAD, local_cxy, rpc->index, cycle );
    187179#endif
     
    190182        {
    191183
    192 #if CONFIG_DEBUG_RPC_SEND
    193 cycle = (uint32_t)hal_get_cycles();
    194 if( CONFIG_DEBUG_RPC_SEND < cycle )
    195 printk("\n[DBG] %s : thread %x in cluster %x deschedule / rpc[%d] / cycle %d\n",
     184#if DEBUG_RPC_CLIENT_GENERIC
     185cycle = (uint32_t)hal_get_cycles();
     186if( DEBUG_RPC_CLIENT_GENERIC < cycle )
     187printk("\n[DBG] %s : thread %x in cluster %x blocks & deschedules / rpc[%d] / cycle %d\n",
    196188__FUNCTION__, CURRENT_THREAD, local_cxy, rpc->index , cycle );
    197189#endif
     
    199191            sched_yield("blocked on RPC");
    200192
    201 #if CONFIG_DEBUG_RPC_SEND
    202 cycle = (uint32_t)hal_get_cycles();
    203 if( CONFIG_DEBUG_RPC_SEND < cycle )
    204 printk("\n[DBG] %s : thread % in cluster %x resume / rpcr[%d] / cycle %d\n",
     193#if DEBUG_RPC_CLIENT_GENERIC
     194cycle = (uint32_t)hal_get_cycles();
     195if( DEBUG_RPC_CLIENT_GENERIC < cycle )
     196printk("\n[DBG] %s : thread %x in cluster %x resumes / rpcr[%d] / cycle %d\n",
    205197__FUNCTION__, CURRENT_THREAD, local_cxy, rpc->index, cycle );
    206198#endif
     
    208200
    209201        // check response available
    210         assert( (rpc->response == 0) , __FUNCTION__, "illegal RPC response\n" );
    211 
    212         // acknowledge the IPI sent by the server
    213         dev_pic_ack_ipi();
     202        assert( (rpc->responses == 0) , __FUNCTION__, "illegal RPC response\n" );
    214203    }
    215     else
     204    else  // non blocking RPC
    216205    {
    217206
    218 #if CONFIG_DEBUG_RPC_SEND
    219 cycle = (uint32_t)hal_get_cycles();
    220 if( CONFIG_DEBUG_RPC_SEND < cycle )
     207#if DEBUG_RPC_CLIENT_GENERIC
     208cycle = (uint32_t)hal_get_cycles();
     209if( DEBUG_RPC_CLIENT_GENERIC < cycle )
    221210printk("\n[DBG] %s : non blocking rpc[%d] => thread %x return  / cycle %d\n",
    222211__FUNCTION__, rpc->index, CURRENT_THREAD, cycle );
     
    244233        remote_fifo_t * rpc_fifo = &LOCAL_CLUSTER->rpc_fifo;
    245234
    246 #if CONFIG_DEBUG_RPC_SERVER
     235#if DEBUG_RPC_SERVER_GENERIC
    247236uint32_t cycle = (uint32_t)hal_get_cycles();
    248 if( CONFIG_DEBUG_RPC_SERVER < cycle )
     237if( DEBUG_RPC_SERVER_GENERIC < cycle )
    249238printk("\n[DBG] %s : thread %x interrupted in cluster %x / cycle %d\n",
    250239__FUNCTION__, this, local_cxy, cycle );
     
    254243        hal_disable_irq( &sr_save );
    255244
    256     // check RPC FIFO not empty and no RPC thread handling it 
     245    // activate (or create) RPC thread if RPC FIFO not empty 
    257246        if( (rpc_fifo->owner == 0) && (local_fifo_is_empty(rpc_fifo) == false) )
    258247    {
    259         // search one non blocked RPC thread   
     248
     249#if DEBUG_RPC_SERVER_GENERIC
     250cycle = (uint32_t)hal_get_cycles();
     251if( DEBUG_RPC_SERVER_GENERIC < cycle )
     252printk("\n[DBG] %s : RPC FIFO non empty in cluster %x / cycle %d\n",
     253__FUNCTION__, local_cxy, cycle );
     254#endif
     255
     256        // search one IDLE RPC thread   
    260257        list_entry_t * iter;
    261258        LIST_FOREACH( &sched->k_root , iter )
    262259        {
    263260            thread = LIST_ELEMENT( iter , thread_t , sched_list );
    264             if( (thread->type == THREAD_RPC) && (thread->blocked == 0 ) )
     261            if( (thread->type == THREAD_RPC) && (thread->blocked == THREAD_BLOCKED_IDLE ) )
    265262            {
     263                // unblock found RPC thread
     264                thread_unblock( XPTR( local_cxy , thread ) , THREAD_BLOCKED_IDLE );
     265
     266                // exit loop
    266267                found = true;
    267268                break;
     
    279280                if( error )
    280281            {
    281                 printk("\n[WARNING] in %s : no memory for new RPC thread in cluster %x\n",
    282                 __FUNCTION__ , local_cxy );
     282                assert( false , __FUNCTION__ ,
     283                "no memory to allocate a new RPC thread in cluster %x", local_cxy );
    283284            }
    284             else
    285             {
    286                 // unblock created RPC thread
    287                 thread->blocked = 0;
    288 
    289                 // update core descriptor counter 
    290                     hal_atomic_add( &LOCAL_CLUSTER->rpc_threads , 1 );
    291 
    292 #if CONFIG_DEBUG_RPC_SERVER
    293 cycle = (uint32_t)hal_get_cycles();
    294 if( CONFIG_DEBUG_RPC_SERVER < cycle )
     285
     286            // unblock created RPC thread
     287            thread->blocked = 0;
     288
     289            // update core descriptor counter 
     290            hal_atomic_add( &LOCAL_CLUSTER->rpc_threads , 1 );
     291
     292#if DEBUG_RPC_SERVER_GENERIC
     293cycle = (uint32_t)hal_get_cycles();
     294if( DEBUG_RPC_SERVER_GENERIC < cycle )
    295295printk("\n[DBG] %s : create a new RPC thread %x in cluster %x / cycle %d\n",
    296296__FUNCTION__, thread, local_cxy, cycle );
    297297#endif
    298             }
    299298        }
    300299    }
    301300
    302 #if CONFIG_DEBUG_RPC_SERVER
    303 cycle = (uint32_t)hal_get_cycles();
    304 if( CONFIG_DEBUG_RPC_SERVER < cycle )
     301#if DEBUG_RPC_SERVER_GENERIC
     302cycle = (uint32_t)hal_get_cycles();
     303if( DEBUG_RPC_SERVER_GENERIC < cycle )
    305304printk("\n[DBG] %s : interrupted thread %x deschedules in cluster %x / cycle %d\n",
    306305__FUNCTION__, this, local_cxy, cycle );
    307306#endif
    308307
    309     // interrupted thread deschedule always          
     308    // interrupted thread always deschedule         
    310309        sched_yield("IPI received");
    311310
    312 #if CONFIG_DEBUG_RPC_SERVER
    313 cycle = (uint32_t)hal_get_cycles();
    314 if( CONFIG_DEBUG_RPC_SERVER < cycle )
     311#if DEBUG_RPC_SERVER_GENERIC
     312cycle = (uint32_t)hal_get_cycles();
     313if( DEBUG_RPC_SERVER_GENERIC < cycle )
    315314printk("\n[DBG] %s : interrupted thread %x resumes in cluster %x / cycle %d\n",
    316315__FUNCTION__, this, local_cxy, cycle );
     
    346345    // - internal loop : handle up to CONFIG_RPC_PENDING_MAX RPC requests
    347346 
    348         while(1)  // external loop
     347        while(1)  // infinite loop
    349348        {
    350349        // try to take RPC_FIFO ownership
     
    352351        {
    353352
    354 #if CONFIG_DEBUG_RPC_SERVER
     353#if DEBUG_RPC_SERVER_GENERIC
    355354uint32_t cycle = (uint32_t)hal_get_cycles();
    356 if( CONFIG_DEBUG_RPC_SERVER < cycle )
     355if( DEBUG_RPC_SERVER_GENERIC < cycle )
    357356printk("\n[DBG] %s : RPC thread %x in cluster %x takes RPC fifo ownership / cycle %d\n",
    358357__FUNCTION__, this, local_cxy, cycle );
     
    360359            // initializes RPC requests counter
    361360            count = 0;
    362 
    363             // acknowledge local IPI
    364             dev_pic_ack_ipi();
    365361
    366362                    // exit internal loop in three cases:
     
    381377                    blocking = hal_remote_lw( XPTR( desc_cxy , &desc_ptr->blocking ) );
    382378
    383 #if CONFIG_DEBUG_RPC_SERVER
    384 cycle = (uint32_t)hal_get_cycles();
    385 if( CONFIG_DEBUG_RPC_SERVER < cycle )
    386 printk("\n[DBG] %s : RPC thread %x in cluster %x got rpc[%d] / rpc_ptr %x / cycle %d\n",
    387 __FUNCTION__, this, local_cxy, index, desc_ptr, cycle );
     379#if DEBUG_RPC_SERVER_GENERIC
     380cycle = (uint32_t)hal_get_cycles();
     381if( DEBUG_RPC_SERVER_GENERIC < cycle )
     382printk("\n[DBG] %s : RPC thread %x in cluster %x got rpc[%d] / rpc_cxy %x / rpc_ptr %x\n",
     383__FUNCTION__, this, local_cxy, index, desc_cxy, desc_ptr );
    388384#endif
    389385                    // call the relevant server function
    390386                    rpc_server[index]( desc_xp );
    391387
    392 #if CONFIG_DEBUG_RPC_SERVER
    393 cycle = (uint32_t)hal_get_cycles();
    394 if( CONFIG_DEBUG_RPC_SERVER < cycle )
     388#if DEBUG_RPC_SERVER_GENERIC
     389cycle = (uint32_t)hal_get_cycles();
     390if( DEBUG_RPC_SERVER_GENERIC < cycle )
    395391printk("\n[DBG] %s : RPC thread %x in cluster %x completes rpc[%d] / rpc_ptr %x / cycle %d\n",
    396 __FUNCTION__, this, local_cxy, index, cycle );
     392__FUNCTION__, this, local_cxy, index, desc_ptr, cycle );
    397393#endif
    398394                    // increment handled RPCs counter
     
    403399                    {
    404400                        // decrement responses counter in RPC descriptor
    405                         hal_remote_atomic_add(XPTR( desc_cxy, &desc_ptr->response ), -1);
     401                        hal_remote_atomic_add( XPTR( desc_cxy, &desc_ptr->responses ), -1 );
     402
     403                        // get client thread pointer and client core lid from RPC descriptor
     404                        thread_ptr = hal_remote_lpt( XPTR( desc_cxy , &desc_ptr->thread ) );
     405                        core_lid   = hal_remote_lw ( XPTR( desc_cxy , &desc_ptr->lid ) );
    406406
    407407                        // unblock client thread
    408                         thread_ptr = (thread_t *)hal_remote_lpt(XPTR(desc_cxy,&desc_ptr->thread));
    409                         thread_unblock( XPTR(desc_cxy,thread_ptr) , THREAD_BLOCKED_RPC );
     408                        thread_unblock( XPTR( desc_cxy , thread_ptr ) , THREAD_BLOCKED_RPC );
    410409
    411410                        hal_fence();
    412411
    413                         // get client core lid and send IPI
    414                         core_lid = hal_remote_lw(XPTR(desc_cxy, &desc_ptr->lid));
     412#if DEBUG_RPC_SERVER_GENERIC
     413cycle = (uint32_t)hal_get_cycles();
     414if( DEBUG_RPC_SERVER_GENERIC < cycle )
     415printk("\n[DBG] %s : RPC thread %x (cluster %x) unblocked client thread %x (cluster %x)\n",
     416__FUNCTION__, this, local_cxy, thread_ptr, desc_cxy, cycle );
     417#endif
     418                        // send IPI to client core
    415419                            dev_pic_send_ipi( desc_cxy , core_lid );
    416420                    }
     
    432436            {
    433437
    434 #if CONFIG_DEBUG_RPC_SERVER
     438#if DEBUG_RPC_SERVER_GENERIC
    435439uint32_t cycle = (uint32_t)hal_get_cycles();
    436 if( CONFIG_DEBUG_RPC_SERVER < cycle )
     440if( DEBUG_RPC_SERVER_GENERIC < cycle )
    437441printk("\n[DBG] %s : RPC thread %x in cluster %x suicides / cycle %d\n",
    438442__FUNCTION__, this, local_cxy, cycle );
     
    447451            }
    448452
    449 #if CONFIG_DEBUG_RPC_SERVER
     453#if DEBUG_RPC_SERVER_GENERIC
    450454uint32_t cycle = (uint32_t)hal_get_cycles();
    451 if( CONFIG_DEBUG_RPC_SERVER < cycle )
     455if( DEBUG_RPC_SERVER_GENERIC < cycle )
    452456printk("\n[DBG] %s : RPC thread %x in cluster %x deschedules / cycle %d\n",
    453457__FUNCTION__, this, local_cxy, cycle );
    454458#endif
    455459
    456         // deschedule without blocking
     460        // Block and deschedule
     461        thread_block( XPTR( local_cxy , this ) , THREAD_BLOCKED_IDLE );
    457462        sched_yield("RPC fifo empty or too much work");
    458463
    459 #if CONFIG_DEBUG_RPC_SERVER
    460 cycle = (uint32_t)hal_get_cycles();
    461 if( CONFIG_DEBUG_RPC_SERVER < cycle )
     464#if DEBUG_RPC_SERVER_GENERIC
     465cycle = (uint32_t)hal_get_cycles();
     466if( DEBUG_RPC_SERVER_GENERIC < cycle )
    462467printk("\n[DBG] %s : RPC thread %x in cluster %x resumes / cycle %d\n",
    463468__FUNCTION__, this, local_cxy, cycle );
    464469#endif
    465470
    466         } // end external loop
     471        } // end infinite loop
    467472
    468473} // end rpc_thread_func()
     
    478483                                page_t  ** page )      // out
    479484{
    480 
    481     assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");
    482 
    483     // initialise RPC descriptor header
    484     rpc_desc_t  rpc;
    485     rpc.index    = RPC_PMEM_GET_PAGES;
    486     rpc.response = 1;
    487     rpc.blocking = true;
     485#if DEBUG_RPC_PMEM_GET_PAGES
     486uint32_t cycle = (uint32_t)hal_get_cycles();
     487if( cycle > DEBUG_RPC_PMEM_GET_PAGES )
     488printk("\n[DBG] %s : thread %x enter on core[%x,%d] / cycle %d\n",
     489__FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle );
     490#endif
     491
     492    assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");
     493
     494    // initialise RPC descriptor header
     495    rpc_desc_t  rpc;
     496    rpc.index     = RPC_PMEM_GET_PAGES;
     497    rpc.blocking  = true;
     498    rpc.responses = 1;
    488499
    489500    // set input arguments in RPC descriptor
     
    496507    *page = (page_t *)(intptr_t)rpc.args[1];
    497508
     509#if DEBUG_RPC_PMEM_GET_PAGES
     510cycle = (uint32_t)hal_get_cycles();
     511if( cycle > DEBUG_RPC_PMEM_GET_PAGES )
     512printk("\n[DBG] %s : thread %x exit / cycle %d\n",
     513__FUNCTION__ , CURRENT_THREAD , cycle );
     514#endif
    498515}
    499516
     
    501518void rpc_pmem_get_pages_server( xptr_t xp )
    502519{
     520#if DEBUG_RPC_PMEM_GET_PAGES
     521uint32_t cycle = (uint32_t)hal_get_cycles();
     522if( cycle > DEBUG_RPC_PMEM_GET_PAGES )
     523printk("\n[DBG] %s : thread %x enter on core[%x,%d] / cycle %d\n",
     524__FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle );
     525#endif
    503526
    504527    // get client cluster identifier and pointer on RPC descriptor
     
    515538    hal_remote_swd( XPTR( cxy , &desc->args[1] ) , (uint64_t)(intptr_t)page );
    516539
     540#if DEBUG_RPC_PMEM_GET_PAGES
     541cycle = (uint32_t)hal_get_cycles();
     542if( cycle > DEBUG_RPC_PMEM_GET_PAGES )
     543printk("\n[DBG] %s : thread %x exit / cycle %d\n",
     544__FUNCTION__ , CURRENT_THREAD , cycle );
     545#endif
    517546}
    518547
     
    525554                                    page_t  * page )      // out
    526555{
     556#if DEBUG_RPC_PMEM_RELEASE_PAGES
     557uint32_t cycle = (uint32_t)hal_get_cycles();
     558if( cycle > DEBUG_RPC_PMEM_RELEASE_PAGES )
     559printk("\n[DBG] %s : thread %x enter on core[%x,%d] / cycle %d\n",
     560__FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle );
     561#endif
    527562
    528563    assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");
     
    531566    rpc_desc_t  rpc;
    532567    rpc.index    = RPC_PMEM_RELEASE_PAGES;
    533     rpc.response = 1;
    534     rpc.blocking = true;
     568    rpc.blocking = true;
     569    rpc.responses = 1;
    535570
    536571    // set input arguments in RPC descriptor
     
    540575    rpc_send( cxy , &rpc );
    541576
     577#if DEBUG_RPC_PMEM_RELEASE_PAGES
     578cycle = (uint32_t)hal_get_cycles();
     579if( cycle > DEBUG_RPC_PMEM_RELEASE_PAGES )
     580printk("\n[DBG] %s : thread %x exit / cycle %d\n",
     581__FUNCTION__ , CURRENT_THREAD , cycle );
     582#endif
    542583}
    543584
     
    545586void rpc_pmem_release_pages_server( xptr_t xp )
    546587{
     588#if DEBUG_RPC_PMEM_RELEASE_PAGES
     589uint32_t cycle = (uint32_t)hal_get_cycles();
     590if( cycle > DEBUG_RPC_PMEM_RELEASE_PAGES )
     591printk("\n[DBG] %s : thread %x enter on core[%x,%d] / cycle %d\n",
     592__FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle );
     593#endif
    547594
    548595    // get client cluster identifier and pointer on RPC descriptor
     
    559606    kmem_free( &req );
    560607
     608#if DEBUG_RPC_PMEM_RELEASE_PAGES
     609cycle = (uint32_t)hal_get_cycles();
     610if( cycle > DEBUG_RPC_PMEM_RELEASE_PAGES )
     611printk("\n[DBG] %s : thread %x exit / cycle %d\n",
     612__FUNCTION__ , CURRENT_THREAD , cycle );
     613#endif
    561614}
    562615
     
    577630                                   error_t   * error )              // out
    578631{
     632#if DEBUG_RPC_PROCESS_MAKE_FORK
     633uint32_t cycle = (uint32_t)hal_get_cycles();
     634if( cycle > DEBUG_RPC_PROCESS_MAKE_FORK )
     635printk("\n[DBG] %s : thread %x enter on core[%x,%d] / cycle %d\n",
     636__FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle );
     637#endif
     638
    579639    assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");
    580640
     
    582642    rpc_desc_t  rpc;
    583643    rpc.index    = RPC_PROCESS_MAKE_FORK;
    584     rpc.response = 1;
    585     rpc.blocking = true;
     644    rpc.blocking = true;
     645    rpc.responses = 1;
    586646
    587647    // set input arguments in RPC descriptor 
     
    597657    *error             = (error_t)rpc.args[4];     
    598658
     659#if DEBUG_RPC_PROCESS_MAKE_FORK
     660cycle = (uint32_t)hal_get_cycles();
     661if( cycle > DEBUG_RPC_PROCESS_MAKE_FORK )
     662printk("\n[DBG] %s : thread %x exit / cycle %d\n",
     663__FUNCTION__ , CURRENT_THREAD , cycle );
     664#endif
    599665}
    600666
     
    602668void rpc_process_make_fork_server( xptr_t xp )
    603669{
     670#if DEBUG_RPC_PROCESS_MAKE_FORK
     671uint32_t cycle = (uint32_t)hal_get_cycles();
     672if( cycle > DEBUG_RPC_PROCESS_MAKE_FORK )
     673printk("\n[DBG] %s : thread %x enter on core[%x,%d] / cycle %d\n",
     674__FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle );
     675#endif
    604676
    605677    xptr_t     ref_process_xp;     // extended pointer on reference parent process
     
    628700    hal_remote_swd( XPTR( client_cxy , &desc->args[4] ) , (uint64_t)error );
    629701
     702#if DEBUG_RPC_PROCESS_MAKE_FORK
     703cycle = (uint32_t)hal_get_cycles();
     704if( cycle > DEBUG_RPC_PROCESS_MAKE_FORK )
     705printk("\n[DBG] %s : thread %x exit / cycle %d\n",
     706__FUNCTION__ , CURRENT_THREAD , cycle );
     707#endif
    630708}
    631709
     
    656734    rpc_desc_t  rpc;
    657735    rpc.index    = RPC_THREAD_USER_CREATE;
    658     rpc.response = 1;
    659     rpc.blocking = true;
     736    rpc.blocking = true;
     737    rpc.responses = 1;
    660738
    661739    // set input arguments in RPC descriptor
     
    690768    // get client cluster identifier and pointer on RPC descriptor
    691769    cxy_t        client_cxy  = GET_CXY( xp );
    692     rpc_desc_t * desc = GET_PTR( xp );
     770    rpc_desc_t * desc        = GET_PTR( xp );
    693771
    694772    // get pointer on attributes structure in client cluster from RPC descriptor
     
    736814    rpc_desc_t  rpc;
    737815    rpc.index    = RPC_THREAD_KERNEL_CREATE;
    738     rpc.response = 1;
    739     rpc.blocking = true;
     816    rpc.blocking = true;
     817    rpc.responses = 1;
    740818
    741819    // set input arguments in RPC descriptor
     
    763841    // get client cluster identifier and pointer on RPC descriptor
    764842    cxy_t        client_cxy  = GET_CXY( xp );
    765     rpc_desc_t * desc = GET_PTR( xp );
     843    rpc_desc_t * desc        = GET_PTR( xp );
    766844
    767845    // get attributes from RPC descriptor
     
    797875{
    798876
    799 #if (CONFIG_DEBUG_PROCESS_SIGACTION & 1)
     877#if DEBUG_RPC_PROCESS_SIGACTION
    800878uint32_t  cycle  = (uint32_t)hal_get_cycles();
    801879uint32_t  action = rpc->args[0];
    802880pid_t     pid    = rpc->args[1];
    803 if( CONFIG_DEBUG_PROCESS_SIGACTION < cycle )
     881if( DEBUG_RPC_PROCESS_SIGACTION < cycle )
    804882printk("\n[DBG] %s : enter to %s process %x in cluster %x / cycle %d\n",
    805883__FUNCTION__ , process_action_str( action ) , pid , cxy , cycle );
     
    813891    rpc_send( cxy , rpc );
    814892
    815 #if (CONFIG_DEBUG_PROCESS_SIGACTION & 1)
    816 cycle = (uint32_t)hal_get_cycles();
    817 if( CONFIG_DEBUG_PROCESS_SIGACTION < cycle )
     893#if DEBUG_RPC_PROCESS_SIGACTION
     894cycle = (uint32_t)hal_get_cycles();
     895if( DEBUG_RPC_PROCESS_SIGACTION < cycle )
    818896printk("\n[DBG] %s : exit after requesting to %s process %x in cluster %x / cycle %d\n",
    819897__FUNCTION__ , process_action_str( action ) , pid , cxy , cycle );
     
    842920    pid      = (pid_t)   hal_remote_lwd( XPTR(client_cxy , &rpc->args[1]) );
    843921
    844 #if (CONFIG_DEBUG_PROCESS_SIGACTION & 1)
     922#if DEBUG_RPC_PROCESS_SIGACTION
    845923uint32_t cycle = (uint32_t)hal_get_cycles();
    846 if( CONFIG_DEBUG_PROCESS_SIGACTION < cycle )
     924if( DEBUG_RPC_PROCESS_SIGACTION < cycle )
    847925printk("\n[DBG] %s : enter to %s process %x in cluster %x / cycle %d\n",
    848926__FUNCTION__ , process_action_str( action ) , pid , local_cxy , cycle );
     
    858936
    859937    // build extended pointer on response counter in RPC
    860     count_xp = XPTR( client_cxy , &rpc->response );
     938    count_xp = XPTR( client_cxy , &rpc->responses );
    861939
    862940    // decrement the responses counter in RPC descriptor,
     
    872950    }
    873951
    874 #if (CONFIG_DEBUG_PROCESS_SIGACTION & 1)
    875 cycle = (uint32_t)hal_get_cycles();
    876 if( CONFIG_DEBUG_PROCESS_SIGACTION < cycle )
     952#if DEBUG_RPC_PROCESS_SIGACTION
     953cycle = (uint32_t)hal_get_cycles();
     954if( DEBUG_RPC_PROCESS_SIGACTION < cycle )
    877955printk("\n[DBG] %s : exit after %s process %x in cluster %x / cycle %d\n",
    878956__FUNCTION__ , process_action_str( action ) , pid , local_cxy , cycle );
     
    903981    rpc_desc_t  rpc;
    904982    rpc.index    = RPC_VFS_INODE_CREATE;
    905     rpc.response = 1;
    906     rpc.blocking = true;
     983    rpc.blocking = true;
     984    rpc.responses = 1;
    907985
    908986    // set input arguments in RPC descriptor
     
    9831061    rpc_desc_t  rpc;
    9841062    rpc.index    = RPC_VFS_INODE_DESTROY;
    985     rpc.response = 1;
    986     rpc.blocking = true;
     1063    rpc.blocking = true;
     1064    rpc.responses = 1;
    9871065
    9881066    // set input arguments in RPC descriptor
     
    10231101                                   error_t              * error )       // out
    10241102{
    1025     RPC_DEBUG_ENTER
     1103#if DEBUG_RPC_VFS_DENTRY_CREATE
     1104uint32_t cycle = (uint32_t)hal_get_cycles();
     1105if( cycle > DEBUG_RPC_VFS_DENTRY_CREATE )
     1106printk("\n[DBG] %s : thread %x enter on core[%x,%d] / cycle %d\n",
     1107__FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle );
     1108#endif
    10261109
    10271110    assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");
     
    10301113    rpc_desc_t  rpc;
    10311114    rpc.index    = RPC_VFS_DENTRY_CREATE;
    1032     rpc.response = 1;
    1033     rpc.blocking = true;
     1115    rpc.blocking = true;
     1116    rpc.responses = 1;
    10341117
    10351118    // set input arguments in RPC descriptor
     
    10451128    *error     = (error_t)rpc.args[4];
    10461129
    1047     RPC_DEBUG_EXIT
     1130#if DEBUG_RPC_VFS_DENTRY_CREATE
     1131cycle = (uint32_t)hal_get_cycles();
     1132if( cycle > DEBUG_RPC_VFS_DENTRY_CREATE )
     1133printk("\n[DBG] %s : thread %x exit / cycle %d\n",
     1134__FUNCTION__ , CURRENT_THREAD , cycle );
     1135#endif
    10481136}
    10491137
     
    10511139void rpc_vfs_dentry_create_server( xptr_t xp )
    10521140{
     1141#if DEBUG_RPC_VFS_DENTRY_CREATE
     1142uint32_t cycle = (uint32_t)hal_get_cycles();
     1143if( cycle > DEBUG_RPC_VFS_DENTRY_CREATE )
     1144printk("\n[DBG] %s : thread %x enter on core[%x,%d] / cycle %d\n",
     1145__FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle );
     1146#endif
     1147
    10531148    uint32_t      type;
    10541149    char        * name;
     
    10561151    xptr_t        dentry_xp;
    10571152    error_t       error;
    1058 
    1059     RPC_DEBUG_ENTER
    1060 
    10611153    char          name_copy[CONFIG_VFS_MAX_NAME_LENGTH];
    10621154
     
    10831175    hal_remote_swd( XPTR( client_cxy , &desc->args[4] ) , (uint64_t)error );
    10841176
    1085     RPC_DEBUG_EXIT
     1177#if DEBUG_RPC_VFS_DENTRY_CREATE
     1178cycle = (uint32_t)hal_get_cycles();
     1179if( cycle > DEBUG_RPC_VFS_DENTRY_CREATE )
     1180printk("\n[DBG] %s : thread %x exit / cycle %d\n",
     1181__FUNCTION__ , CURRENT_THREAD , cycle );
     1182#endif
    10861183}
    10871184
     
    11001197    rpc_desc_t  rpc;
    11011198    rpc.index    = RPC_VFS_DENTRY_DESTROY;
    1102     rpc.response = 1;
    1103     rpc.blocking = true;
     1199    rpc.blocking = true;
     1200    rpc.responses = 1;
    11041201
    11051202    // set input arguments in RPC descriptor
     
    11401237                                 error_t              * error )      // out
    11411238{
     1239#if DEBUG_RPC_VFS_FILE_CREATE
     1240uint32_t cycle = (uint32_t)hal_get_cycles();
     1241if( cycle > DEBUG_RPC_VFS_FILE_CREATE )
     1242printk("\n[DBG] %s : thread %x enter on core[%x,%d] / cycle %d\n",
     1243__FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle );
     1244#endif
     1245
    11421246    assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");
    11431247
     
    11451249    rpc_desc_t  rpc;
    11461250    rpc.index    = RPC_VFS_FILE_CREATE;
    1147     rpc.response = 1;
    1148     rpc.blocking = true;
     1251    rpc.blocking = true;
     1252    rpc.responses = 1;
    11491253
    11501254    // set input arguments in RPC descriptor
     
    11591263    *error   = (error_t)rpc.args[3];
    11601264
     1265#if DEBUG_RPC_VFS_FILE_CREATE
     1266cycle = (uint32_t)hal_get_cycles();
     1267if( cycle > DEBUG_RPC_VFS_FILE_CREATE )
     1268printk("\n[DBG] %s : thread %x exit / cycle %d\n",
     1269__FUNCTION__ , CURRENT_THREAD , cycle );
     1270#endif
    11611271}
    11621272
     
    11641274void rpc_vfs_file_create_server( xptr_t xp )
    11651275{
     1276#if DEBUG_RPC_VFS_FILE_CREATE
     1277uint32_t cycle = (uint32_t)hal_get_cycles();
     1278if( cycle > DEBUG_RPC_VFS_FILE_CREATE )
     1279printk("\n[DBG] %s : thread %x enter on core[%x,%d] / cycle %d\n",
     1280__FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle );
     1281#endif
     1282
    11661283    uint32_t      file_attr;
    11671284    vfs_inode_t * inode;
     
    11861303    hal_remote_swd( XPTR( client_cxy , &desc->args[3] ) , (uint64_t)error );
    11871304
     1305#if DEBUG_RPC_VFS_FILE_CREATE
     1306cycle = (uint32_t)hal_get_cycles();
     1307if( cycle > DEBUG_RPC_VFS_FILE_CREATE )
     1308printk("\n[DBG] %s : thread %x exit / cycle %d\n",
     1309__FUNCTION__ , CURRENT_THREAD , cycle );
     1310#endif
    11881311}
    11891312
     
    12011324    rpc_desc_t  rpc;
    12021325    rpc.index    = RPC_VFS_FILE_DESTROY;
    1203     rpc.response = 1;
    1204     rpc.blocking = true;
     1326    rpc.blocking = true;
     1327    rpc.responses = 1;
    12051328
    12061329    // set input arguments in RPC descriptor
     
    12451368    rpc_desc_t  rpc;
    12461369    rpc.index    = RPC_VFS_INODE_LOAD;
    1247     rpc.response = 1;
    1248     rpc.blocking = true;
     1370    rpc.blocking = true;
     1371    rpc.responses = 1;
    12491372
    12501373    // set input arguments in RPC descriptor
     
    13061429    rpc_desc_t  rpc;
    13071430    rpc.index    = RPC_VFS_MAPPER_LOAD_ALL;
    1308     rpc.response = 1;
    1309     rpc.blocking = true;
     1431    rpc.blocking = true;
     1432    rpc.responses = 1;
    13101433
    13111434    // set input arguments in RPC descriptor
     
    13581481    rpc_desc_t  rpc;
    13591482    rpc.index    = RPC_FATFS_GET_CLUSTER;
    1360     rpc.response = 1;
    1361     rpc.blocking = true;
     1483    rpc.blocking = true;
     1484    rpc.responses = 1;
    13621485
    13631486    // set input arguments in RPC descriptor
     
    13861509    // get client cluster identifier and pointer on RPC descriptor
    13871510    cxy_t        client_cxy  = GET_CXY( xp );
    1388     rpc_desc_t * desc = GET_PTR( xp );
     1511    rpc_desc_t * desc        = GET_PTR( xp );
    13891512
    13901513    // get input arguments
     
    14181541    rpc_desc_t  rpc;
    14191542    rpc.index    = RPC_VMM_GET_VSEG;
    1420     rpc.response = 1;
    1421     rpc.blocking = true;
     1543    rpc.blocking = true;
     1544    rpc.responses = 1;
    14221545
    14231546    // set input arguments in RPC descriptor
     
    14801603    rpc_desc_t  rpc;
    14811604    rpc.index    = RPC_VMM_GET_PTE;
    1482     rpc.response = 1;
    1483     rpc.blocking = true;
     1605    rpc.blocking = true;
     1606    rpc.responses = 1;
    14841607
    14851608    // set input arguments in RPC descriptor
     
    15411664    rpc_desc_t  rpc;
    15421665    rpc.index    = RPC_THREAD_USER_CREATE;
    1543     rpc.response = 1;
    1544     rpc.blocking = true;
     1666    rpc.blocking = true;
     1667    rpc.responses = 1;
    15451668
    15461669    // set input arguments in RPC descriptor
     
    15601683    // get client cluster identifier and pointer on RPC descriptor
    15611684    cxy_t        client_cxy  = GET_CXY( xp );
    1562     rpc_desc_t * desc = GET_PTR( xp );
     1685    rpc_desc_t * desc        = GET_PTR( xp );
    15631686
    15641687    // get input argument "kmem_type" from client RPC descriptor
     
    15911714    rpc_desc_t  rpc;
    15921715    rpc.index    = RPC_THREAD_USER_CREATE;
    1593     rpc.response = 1;
    1594     rpc.blocking = true;
     1716    rpc.blocking = true;
     1717    rpc.responses = 1;
    15951718
    15961719    // set input arguments in RPC descriptor
     
    16081731    // get client cluster identifier and pointer on RPC descriptor
    16091732    cxy_t        client_cxy  = GET_CXY( xp );
    1610     rpc_desc_t * desc = GET_PTR( xp );
     1733    rpc_desc_t * desc        = GET_PTR( xp );
    16111734
    16121735    // get input arguments "buf" and "kmem_type" from client RPC descriptor
     
    16411764    rpc_desc_t  rpc;
    16421765    rpc.index    = RPC_MAPPER_MOVE_BUFFER;
    1643     rpc.response = 1;
    1644     rpc.blocking = true;
     1766    rpc.blocking = true;
     1767    rpc.responses = 1;
    16451768
    16461769    // set input arguments in RPC descriptor
     
    17251848    rpc_desc_t  rpc;
    17261849    rpc.index    = RPC_MAPPER_GET_PAGE;
    1727     rpc.response = 1;
    1728     rpc.blocking = true;
     1850    rpc.blocking = true;
     1851    rpc.responses = 1;
    17291852
    17301853    // set input arguments in RPC descriptor
     
    17801903    rpc_desc_t  rpc;
    17811904    rpc.index    = RPC_VMM_CREATE_VSEG;
    1782     rpc.response = 1;
    1783     rpc.blocking = true;
     1905    rpc.blocking = true;
     1906    rpc.responses = 1;
    17841907
    17851908    // set input arguments in RPC descriptor
     
    18461969    rpc_desc_t  rpc;
    18471970    rpc.index    = RPC_SCHED_DISPLAY;
    1848     rpc.response = 1;
    1849     rpc.blocking = true;
     1971    rpc.blocking = true;
     1972    rpc.responses = 1;
    18501973
    18511974    // set input arguments in RPC descriptor
     
    18852008    rpc_desc_t  rpc;
    18862009    rpc.index    = RPC_VMM_SET_COW;
    1887     rpc.response = 1;
    1888     rpc.blocking = true;
     2010    rpc.blocking = true;
     2011    rpc.responses = 1;
    18892012
    18902013    // set input arguments in RPC descriptor
     
    19272050    rpc_desc_t  rpc;
    19282051    rpc.index    = RPC_VMM_DISPLAY;
    1929     rpc.response = 1;
    1930     rpc.blocking = true;
     2052    rpc.blocking = true;
     2053    rpc.responses = 1;
    19312054
    19322055    // set input arguments in RPC descriptor
  • trunk/kernel/kern/rpc.h

    r437 r438  
    111111{
    112112        rpc_index_t         index;       /*! index of requested RPC service           */
    113         volatile uint32_t   response;    /*! all responses received when 0            */
     113        volatile uint32_t   responses;   /*! number of expected responses             */
    114114    struct thread_s   * thread;      /*! local pointer on client thread           */
    115115    uint32_t            lid;         /*! index of core running the calling thread */
     
    150150
    151151/***********************************************************************************
    152  * This function is the entry point for RPC handling on the server side.
    153  * It is executed by a core receiving an IPI, and each time the core enters,
    154  * or exit the kernel to handle .
    155  * It does nothing and return if the RPC_FIFO is empty.
    156  * The calling thread checks if it exist at least one non-blocked RPC thread,
    157  * creates a new RPC if required, and deschedule to allow the RPC thead to execute.
     152 * This function is the entry point for RPC handling on the server cluster.
     153 * It is executed by the core receiving the IPI sent by the client thread.
     154 * - If the RPC FIFO is empty, it deschedules.
     155 * - If the RPC FIFO is not empty, it checks if it exist a non-blocked RPC thread
     156 *   in the cluster, creates a new one if required, and deschedule to allow
     157 *   the RPC thead to execute.
    158158 **********************************************************************************/
    159159void rpc_check();
  • trunk/kernel/kern/scheduler.c

    r437 r438  
    125125            thread = LIST_ELEMENT( current , thread_t , sched_list );
    126126
    127             // analyse kernel thread type
    128             switch( thread->type )
     127            // execute RPC thread if non blocked
     128            if( (thread->blocked == 0)  &&
     129                (thread->type == THREAD_RPC) )
    129130            {
    130                 case THREAD_RPC:  // if non blocked and RPC FIFO non-empty
    131                 if( (thread->blocked == 0) &&
    132                     (local_fifo_is_empty( &LOCAL_CLUSTER->rpc_fifo ) == 0) )
    133                 {
    134                     spinlock_unlock( &sched->lock );
    135                     return thread;
    136                 }
    137                 break;
    138 
    139                 case THREAD_DEV:  // if non blocked and waiting queue non empty
    140                 if( (thread->blocked == 0) &&
    141                     (xlist_is_empty( XPTR( local_cxy , &thread->chdev->wait_root)) == 0) )
    142                 {
    143                     spinlock_unlock( &sched->lock );
    144                     return thread;
    145                 }
    146                 break;
    147 
    148                 default:
    149                 break;
     131                spinlock_unlock( &sched->lock );
     132                return thread;
     133            }
     134
     135            // execute DEV thread if non blocked and waiting queue non empty
     136            if( (thread->blocked == 0)  &&
     137                (thread->type == THREAD_DEV) &&
     138                (xlist_is_empty( XPTR( local_cxy , &thread->chdev->wait_root)) == 0) )
     139            {
     140                spinlock_unlock( &sched->lock );
     141                return thread;
    150142            }
    151143        } // end loop on kernel threads
     
    174166            thread = LIST_ELEMENT( current , thread_t , sched_list );
    175167
    176             // return thread if runnable
     168            // return thread if non blocked
    177169            if( thread->blocked == 0 )
    178170            {
     
    227219            process = thread->process;
    228220
    229 #if CONFIG_DEBUG_SCHED_HANDLE_SIGNALS
     221#if DEBUG_SCHED_HANDLE_SIGNALS
    230222uint32_t cycle = (uint32_t)hal_get_cycles();
    231 if( CONFIG_DEBUG_SCHED_HANDLE_SIGNALS < cycle )
     223if( DEBUG_SCHED_HANDLE_SIGNALS < cycle )
    232224printk("\n[DBG] %s : thread %x in proces %x must be deleted / cycle %d\n",
    233225__FUNCTION__ , thread , process->pid , cycle );
     
    250242            thread_destroy( thread );
    251243
    252 #if CONFIG_DEBUG_SCHED_HANDLE_SIGNALS
     244#if DEBUG_SCHED_HANDLE_SIGNALS
    253245cycle = (uint32_t)hal_get_cycles();
    254 if( CONFIG_DEBUG_SCHED_HANDLE_SIGNALS < cycle )
     246if( DEBUG_SCHED_HANDLE_SIGNALS < cycle )
    255247printk("\n[DBG] %s : thread %x in process %x has been deleted / cycle %d\n",
    256248__FUNCTION__ , thread , process->pid , cycle );
     
    262254                process_destroy( process );
    263255
    264 #if CONFIG_DEBUG_SCHED_HANDLE_SIGNALS
     256#if DEBUG_SCHED_HANDLE_SIGNALS
    265257cycle = (uint32_t)hal_get_cycles();
    266 if( CONFIG_DEBUG_SCHED_HANDLE_SIGNALS < cycle )
     258if( DEBUG_SCHED_HANDLE_SIGNALS < cycle )
    267259printk("\n[DBG] %s : process %x has been deleted / cycle %d\n",
    268260__FUNCTION__ , process->pid , cycle );
     
    287279    scheduler_t * sched   = &core->scheduler;
    288280 
    289 #if (CONFIG_DEBUG_SCHED_YIELD & 0x1)
    290 if( CONFIG_DEBUG_SCHED_YIELD < (uint32_t)hal_get_cycles() )
     281#if (DEBUG_SCHED_YIELD & 0x1)
     282if( DEBUG_SCHED_YIELD < (uint32_t)hal_get_cycles() )
    291283sched_display( core->lid );
    292284#endif
     
    322314    {
    323315
    324 #if CONFIG_DEBUG_SCHED_YIELD
     316#if DEBUG_SCHED_YIELD
    325317uint32_t cycle = (uint32_t)hal_get_cycles();
    326 if( CONFIG_DEBUG_SCHED_YIELD < cycle )
     318if( DEBUG_SCHED_YIELD < cycle )
    327319printk("\n[DBG] %s : core[%x,%d] / cause = %s\n"
    328320"      thread %x (%s) (%x,%x) => thread %x (%s) (%x,%x) / cycle %d\n",
     
    350342    {
    351343
    352 #if (CONFIG_DEBUG_SCHED_YIELD & 1)
     344#if (DEBUG_SCHED_YIELD & 1)
    353345uint32_t cycle = (uint32_t)hal_get_cycles();
    354 if( CONFIG_DEBUG_SCHED_YIELD < cycle )
     346if( DEBUG_SCHED_YIELD < cycle )
    355347printk("\n[DBG] %s : core[%x,%d] / cause = %s\n"
    356348"      thread %x (%s) (%x,%x) continue / cycle %d\n",
  • trunk/kernel/kern/thread.c

    r436 r438  
    112112/////////////////////////////////////////////////////////////////////////////////////
    113113// This static function initializes a thread descriptor (kernel or user).
    114 // It can be called by the three functions:
     114// It can be called by the four functions:
    115115// - thread_user_create()
    116116// - thread_user_fork()
    117117// - thread_kernel_create()
     118// - thread_idle_init()
     119// It updates the local DQDT.
    118120/////////////////////////////////////////////////////////////////////////////////////
    119121// @ thread       : pointer on thread descriptor
     
    202204    thread->save_sr = 0xFF13;
    203205
    204     // update local DQDT
    205     dqdt_local_update_threads( 1 );
    206 
    207206    // register new thread in core scheduler
    208207    sched_register_thread( thread->core , thread );
     208
     209        // update DQDT
     210    dqdt_update_threads( 1 );
    209211
    210212        return 0;
     
    227229    assert( (attr != NULL) , __FUNCTION__, "pthread attributes must be defined" );
    228230
    229 #if CONFIG_DEBUG_THREAD_USER_CREATE
     231#if DEBUG_THREAD_USER_CREATE
    230232uint32_t cycle = (uint32_t)hal_get_cycles();
    231 if( CONFIG_DEBUG_THREAD_USER_CREATE < cycle )
     233if( DEBUG_THREAD_USER_CREATE < cycle )
    232234printk("\n[DBG] %s : thread %x enter / process %x / cycle %d\n",
    233235__FUNCTION__, CURRENT_THREAD, pid , cycle );
     
    326328    }
    327329
    328         // update DQDT for new thread
    329     dqdt_local_update_threads( 1 );
    330 
    331 #if CONFIG_DEBUG_THREAD_USER_CREATE
     330#if DEBUG_THREAD_USER_CREATE
    332331cycle = (uint32_t)hal_get_cycles();
    333 if( CONFIG_DEBUG_THREAD_USER_CREATE < cycle )
     332if( DEBUG_THREAD_USER_CREATE < cycle )
    334333printk("\n[DBG] %s : thread %x exit / process %x / new_thread %x / core %d / cycle %d\n",
    335334__FUNCTION__, CURRENT_THREAD, pid, thread, core_lid, cycle );
     
    366365    vseg_t       * vseg;             // child thread STACK vseg
    367366
    368 #if CONFIG_DEBUG_THREAD_USER_FORK
     367#if DEBUG_THREAD_USER_FORK
    369368uint32_t cycle = (uint32_t)hal_get_cycles();
    370 if( CONFIG_DEBUG_THREAD_USER_FORK < cycle )
     369if( DEBUG_THREAD_USER_FORK < cycle )
    371370printk("\n[DBG] %s : thread %x enter / child_process %x / cycle %d\n",
    372371__FUNCTION__, CURRENT_THREAD, child_process->pid, cycle );
     
    493492            hal_remote_atomic_add( XPTR( page_cxy , &page_ptr->forks ) , 1 );
    494493
    495 #if (CONFIG_DEBUG_THREAD_USER_FORK & 1)
     494#if (DEBUG_THREAD_USER_FORK & 1)
    496495cycle = (uint32_t)hal_get_cycles();
    497 if( CONFIG_DEBUG_THREAD_USER_FORK < cycle )
     496if( DEBUG_THREAD_USER_FORK < cycle )
    498497printk("\n[DBG] %s : thread %x copied stack PTE to child GPT : vpn %x\n",
    499498__FUNCTION__, CURRENT_THREAD, vpn );
     
    508507                     vpn_size );
    509508 
    510         // update DQDT for child thread
    511     dqdt_local_update_threads( 1 );
    512 
    513 #if CONFIG_DEBUG_THREAD_USER_FORK
     509#if DEBUG_THREAD_USER_FORK
    514510cycle = (uint32_t)hal_get_cycles();
    515 if( CONFIG_DEBUG_THREAD_USER_FORK < cycle )
     511if( DEBUG_THREAD_USER_FORK < cycle )
    516512printk("\n[DBG] %s : thread %x exit / child_process %x / child_thread %x / cycle %d\n",
    517513__FUNCTION__, CURRENT_THREAD, child_process->pid, child_ptr, cycle );
     
    538534            __FUNCTION__ , "illegal core_lid" );
    539535
    540 #if CONFIG_DEBUG_THREAD_KERNEL_CREATE
     536#if DEBUG_THREAD_KERNEL_CREATE
    541537uint32_t cycle = (uint32_t)hal_get_cycles();
    542 if( CONFIG_DEBUG_THREAD_KERNEL_CREATE < cycle )
     538if( DEBUG_THREAD_KERNEL_CREATE < cycle )
    543539printk("\n[DBG] %s : thread %x enter / requested_type %s / cycle %d\n",
    544540__FUNCTION__, CURRENT_THREAD, thread, thread_type_str(type), cycle );
     
    568564        hal_cpu_context_create( thread );
    569565
    570         // update DQDT for kernel thread
    571     dqdt_local_update_threads( 1 );
    572 
    573 #if CONFIG_DEBUG_THREAD_KERNEL_CREATE
     566#if DEBUG_THREAD_KERNEL_CREATE
    574567cycle = (uint32_t)hal_get_cycles();
    575 if( CONFIG_DEBUG_THREAD_KERNEL_CREATE < cycle )
     568if( DEBUG_THREAD_KERNEL_CREATE < cycle )
    576569printk("\n[DBG] %s : thread %x exit / new_thread %x / type %s / cycle %d\n",
    577570__FUNCTION__, CURRENT_THREAD, thread, thread_type_str(type), cycle );
     
    583576} // end thread_kernel_create()
    584577
    585 ///////////////////////////////////////////////////
    586 error_t thread_kernel_init( thread_t      * thread,
    587                             thread_type_t   type,
    588                             void          * func,
    589                             void          * args,
    590                                             lid_t           core_lid )
     578/////////////////////////////////////////////////
     579error_t thread_idle_init( thread_t      * thread,
     580                          thread_type_t   type,
     581                          void          * func,
     582                          void          * args,
     583                                          lid_t           core_lid )
    591584{
    592585    assert( (type == THREAD_IDLE) , __FUNCTION__ , "illegal thread type" );
     
    607600    return error;
    608601
    609 }  // end thread_kernel_init()
     602}  // end thread_idle_init()
    610603
    611604///////////////////////////////////////////////////////////////////////////////////////
     
    620613    core_t     * core       = thread->core;
    621614
    622 #if CONFIG_DEBUG_THREAD_DESTROY
     615#if DEBUG_THREAD_DESTROY
    623616uint32_t cycle = (uint32_t)hal_get_cycles();
    624 if( CONFIG_DEBUG_THREAD_DESTROY < cycle )
     617if( DEBUG_THREAD_DESTROY < cycle )
    625618printk("\n[DBG] %s : thread %x enter to destroy thread %x in process %x / cycle %d\n",
    626619__FUNCTION__, CURRENT_THREAD, thread, process->pid, cycle );
     
    652645    process_remove_thread( thread );
    653646       
    654     // update local DQDT
    655     dqdt_local_update_threads( -1 );
     647    // update DQDT
     648    dqdt_update_threads( -1 );
    656649
    657650    // invalidate thread descriptor
     
    661654    thread_release( thread );
    662655
    663 #if CONFIG_DEBUG_THREAD_DESTROY
     656#if DEBUG_THREAD_DESTROY
    664657cycle = (uint32_t)hal_get_cycles();
    665 if( CONFIG_DEBUG_THREAD_DESTROY < cycle )
     658if( DEBUG_THREAD_DESTROY < cycle )
    666659printk("\n[DBG] %s : thread %x exit / destroyed thread %x in process %x / cycle %d\n",
    667660__FUNCTION__, CURRENT_THREAD, thread, process->pid, cycle );
     
    811804    hal_fence();
    812805
    813 #if CONFIG_DEBUG_THREAD_BLOCK
     806#if DEBUG_THREAD_BLOCK
    814807uint32_t cycle = (uint32_t)hal_get_cycles();
    815 if( CONFIG_DEBUG_THREAD_BLOCK < cycle )
     808if( DEBUG_THREAD_BLOCK < cycle )
    816809printk("\n[DBG] %s : thread %x blocked thread %x / cause %x / cycle %d\n",
    817810__FUNCTION__ , CURRENT_THREAD , ptr , cause , cycle );
    818811#endif
    819812
    820 #if (CONFIG_DEBUG_THREAD_BLOCK & 1)
    821 if( CONFIG_DEBUG_THREAD_BLOCK < cycle )
     813#if (DEBUG_THREAD_BLOCK & 1)
     814if( DEBUG_THREAD_BLOCK < cycle )
    822815sched_display( ptr->core->lid );
    823816#endif
     
    837830    hal_fence();
    838831
    839 #if CONFIG_DEBUG_THREAD_BLOCK
     832#if DEBUG_THREAD_BLOCK
    840833uint32_t cycle = (uint32_t)hal_get_cycles();
    841 if( CONFIG_DEBUG_THREAD_BLOCK < cycle )
     834if( DEBUG_THREAD_BLOCK < cycle )
    842835printk("\n[DBG] %s : thread %x unblocked thread %x / cause %x / cycle %d\n",
    843836__FUNCTION__ , CURRENT_THREAD , ptr , cause , cycle );
    844837#endif
    845838
    846 #if (CONFIG_DEBUG_THREAD_BLOCK & 1)
    847 if( CONFIG_DEBUG_THREAD_BLOCK < cycle )
     839#if (DEBUG_THREAD_BLOCK & 1)
     840if( DEBUG_THREAD_BLOCK < cycle )
    848841sched_display( ptr->core->lid );
    849842#endif
     
    890883    killer_xp  = XPTR( local_cxy , killer_ptr );
    891884
    892 #if CONFIG_DEBUG_THREAD_KILL
     885#if DEBUG_THREAD_KILL
    893886uint32_t cycle  = (uint32_t)hal_get_cycles;
    894 if( CONFIG_DEBUG_THREAD_KILL < cycle )
     887if( DEBUG_THREAD_KILL < cycle )
    895888printk("\n[DBG] %s : thread %x enter for target thread %x / cycle %d\n",
    896889__FUNCTION__, killer_ptr, target_ptr, cycle );
     
    989982        else          hal_remote_atomic_or( process_state_xp , PROCESS_TERM_KILL );
    990983
    991 #if CONFIG_DEBUG_THREAD_KILL
     984#if DEBUG_THREAD_KILL
    992985cycle  = (uint32_t)hal_get_cycles;
    993 if( CONFIG_DEBUG_THREAD_KILL < cycle )
     986if( DEBUG_THREAD_KILL < cycle )
    994987printk("\n[DBG] %s : thread %x exit for thread %x / main thread / cycle %d\n",
    995988__FUNCTION__, killer_ptr, target_ptr, cycle );
     
    1002995        hal_remote_atomic_or( target_flags_xp , THREAD_FLAG_REQ_DELETE );
    1003996
    1004 #if CONFIG_DEBUG_THREAD_KILL
     997#if DEBUG_THREAD_KILL
    1005998cycle  = (uint32_t)hal_get_cycles;
    1006 if( CONFIG_DEBUG_THREAD_KILL < cycle )
     999if( DEBUG_THREAD_KILL < cycle )
    10071000printk("\n[DBG] %s : thread %x exit for thread %x / not the main thread / cycle %d\n",
    10081001__FUNCTION__, killer_ptr, target_ptr, cycle );
     
    10241017        {
    10251018
    1026 #if CONFIG_DEBUG_THREAD_IDLE
     1019#if DEBUG_THREAD_IDLE
    10271020uint32_t cycle  = (uint32_t)hal_get_cycles;
    10281021thread_t * this = CURRENT_THREAD;
    1029 if( CONFIG_DEBUG_THREAD_IDLE < cycle )
     1022if( DEBUG_THREAD_IDLE < cycle )
    10301023printk("\n[DBG] %s : idle thread %x on core[%x,%d] goes to sleep / cycle %d\n",
    10311024__FUNCTION__, this, local_cxy, this->core->lid, cycle );
     
    10341027            hal_core_sleep();
    10351028
    1036 #if CONFIG_DEBUG_THREAD_IDLE
     1029#if DEBUG_THREAD_IDLE
    10371030cycle  = (uint32_t)hal_get_cycles;
    1038 if( CONFIG_DEBUG_THREAD_IDLE < cycle )
     1031if( DEBUG_THREAD_IDLE < cycle )
    10391032printk("\n[DBG] %s : idle thread %x on core[%x,%d] wake up / cycle %d\n",
    10401033__FUNCTION__, this, local_cxy, this->core->lid, cycle );
  • trunk/kernel/kern/thread.h

    r437 r438  
    8787#define THREAD_BLOCKED_SEM       0x0020  /*! thread wait semaphore                    */
    8888#define THREAD_BLOCKED_PAGE      0x0040  /*! thread wait page access                  */
     89#define THREAD_BLOCKED_IDLE      0x0080  /*! thread RPC wait RPC_FIFO non empty       */
    8990#define THREAD_BLOCKED_USERSYNC  0x0100  /*! thread wait (cond/mutex/barrier)         */
    9091#define THREAD_BLOCKED_RPC       0x0200  /*! thread wait RPC completion               */
     
    286287
    287288/***************************************************************************************
    288  * This function initializes an existing thread descriptor from arguments values.
     289 * This function is called by the kernel_init() function to initialize the IDLE thread.
     290 * It initializes an existing thread descriptor from arguments values.
    289291 * The THREAD_BLOCKED_GLOBAL bit is set, and the thread must be activated to start.
    290  * It is called by the kernel_init() function to initialize the IDLE thread.
    291292 ***************************************************************************************
    292293 * @ thread   : pointer on existing thread descriptor.
     
    297298 * @ returns 0 if success / returns EINVAL if error
    298299 **************************************************************************************/
    299 error_t thread_kernel_init( thread_t      * thread,
    300                             thread_type_t   type,
    301                             void          * func,
    302                             void          * args,
    303                             lid_t           core_lid );
     300error_t thread_idle_init( thread_t      * thread,
     301                          thread_type_t   type,
     302                          void          * func,
     303                          void          * args,
     304                          lid_t           core_lid );
    304305
    305306/***************************************************************************************
  • trunk/kernel/libk/elf.c

    r433 r438  
    201201                vfs_file_count_up( file_xp );
    202202
    203 #if CONFIG_DEBUG_ELF_LOAD
     203#if DEBUG_ELF_LOAD
    204204uint32_t cycle = (uint32_t)hal_get_cycles();
    205 if( CONFIG_DEBUG_ELF_LOAD < cycle )
     205if( DEBUG_ELF_LOAD < cycle )
    206206printk("\n[DBG] %s : found %s vseg / base %x / size %x\n"
    207207"  file_size %x / file_offset %x / mapper_xp %l / cycle %d\n",
     
    228228        error_t      error;
    229229
    230 #if CONFIG_DEBUG_ELF_LOAD
     230#if DEBUG_ELF_LOAD
    231231uint32_t cycle = (uint32_t)hal_get_cycles();
    232 if( CONFIG_DEBUG_ELF_LOAD < cycle )
     232if( DEBUG_ELF_LOAD < cycle )
    233233printk("\n[DBG] %s : thread %d enter for <%s> / cycle %d\n",
    234234__FUNCTION__, CURRENT_THREAD, pathname, cycle );
     
    252252        }
    253253
    254 #if (CONFIG_DEBUG_ELF_LOAD & 1)
    255 if( CONFIG_DEBUG_ELF_LOAD < cycle )
     254#if (DEBUG_ELF_LOAD & 1)
     255if( DEBUG_ELF_LOAD < cycle )
    256256printk("\n[DBG] %s : open file <%s>\n", __FUNCTION__, pathname );
    257257#endif
     
    268268        }
    269269
    270 #if (CONFIG_DEBUG_ELF_LOAD & 1)
    271 if( CONFIG_DEBUG_ELF_LOAD < cycle )
     270#if (DEBUG_ELF_LOAD & 1)
     271if( DEBUG_ELF_LOAD < cycle )
    272272printk("\n[DBG] %s : loaded elf header for %s\n", __FUNCTION__ , pathname );
    273273#endif
     
    308308        }
    309309
    310 #if (CONFIG_DEBUG_ELF_LOAD & 1)
    311 if( CONFIG_DEBUG_ELF_LOAD < cycle )
     310#if (DEBUG_ELF_LOAD & 1)
     311if( DEBUG_ELF_LOAD < cycle )
    312312printk("\n[DBG] %s : segments array allocated for %s\n", __FUNCTION__ , pathname );
    313313#endif
     
    328328        }
    329329
    330 #if (CONFIG_DEBUG_ELF_LOAD & 1)
    331 if( CONFIG_DEBUG_ELF_LOAD < cycle )
     330#if (DEBUG_ELF_LOAD & 1)
     331if( DEBUG_ELF_LOAD < cycle )
    332332printk("\n[DBG] %s loaded segments descriptors for %s \n", __FUNCTION__ , pathname );
    333333#endif
     
    356356        kmem_free(&req);
    357357
    358 #if CONFIG_DEBUG_ELF_LOAD
     358#if DEBUG_ELF_LOAD
    359359cycle = (uint32_t)hal_get_cycles();
    360 if( CONFIG_DEBUG_ELF_LOAD < cycle )
     360if( DEBUG_ELF_LOAD < cycle )
    361361printk("\n[DBG] %s : thread %d exit for <%s> / entry_point %x / cycle %d\n",
    362362__FUNCTION__, CURRENT_THREAD, pathname, header.e_entry, cycle );
  • trunk/kernel/libk/remote_rwlock.c

    r436 r438  
    4141    hal_remote_sw ( XPTR( lock_cxy , &lock_ptr->count )   , 0 );
    4242
    43 #if CONFIG_DEBUG_REMOTE_RWLOCKS
     43#if DEBUG_REMOTE_RWLOCKS
    4444hal_remote_swd( XPTR( lock_cxy , &lock_ptr->owner )   , XPTR_NULL );
    4545xlist_entry_init( XPTR( lock_cxy , &lock_ptr->list ) );
     
    8686    thread_ptr->remote_locks++;
    8787
    88 #if CONFIG_DEBUG_REMOTE_RWLOCKS
     88#if DEBUG_REMOTE_RWLOCKS
    8989xlist_add_first( XPTR( local_cxy , &thread_ptr->xlocks_root ) ,
    9090                 XPTR( lock_cxy ,  &lock_ptr->list ) );
     
    126126        thread_ptr->remote_locks--;
    127127
    128 #if CONFIG_DEBUG_REMOTE_RWLOCKS
     128#if DEBUG_REMOTE_RWLOCKS
    129129xlist_unlink( XPTR( lock_cxy , &lock_ptr->list ) );
    130130#endif
     
    176176    }
    177177
    178 #if CONFIG_DEBUG_REMOTE_RWLOCKS
     178#if DEBUG_REMOTE_RWLOCKS
    179179hal_remote_swd( XPTR( lock_cxy  , &lock_ptr->owner ) ,
    180180                XPTR( local_cxy , thread_ptr ) );
  • trunk/kernel/libk/remote_rwlock.h

    r436 r438  
    4848    uint32_t       count;           /*! current number of reader threads              */
    4949
    50 #if CONFIG_DEBUG_REMOTE_RWLOCKS
     50#if DEBUG_REMOTE_RWLOCKS
    5151    xptr_t         owner;           /*! extended pointer on writer thread             */
    5252    xlist_entry_t  list;            /*! member of list of remote locks taken by owner */
  • trunk/kernel/libk/remote_spinlock.c

    r436 r438  
    3838        hal_remote_sw ( XPTR( cxy , &ptr->taken ) , 0 );
    3939
    40 #if CONFIG_DEBUG_REMOTE_SPINLOCKS
     40#if DEBUG_REMOTE_SPINLOCKS
    4141hal_remote_swd( XPTR( cxy , &ptr->owner ) , XPTR_NULL );
    4242xlist_entry_init( XPTR( cxy , &ptr->list ) );
     
    7575                thread_ptr->remote_locks++;
    7676
    77 #if CONFIG_DEBUG_REMOTE_SPINLOCKS
     77#if DEBUG_REMOTE_SPINLOCKS
    7878hal_remote_swd( XPTR( lock_cxy , &lock_ptr->owner ) ,
    7979                XPTR( local_cxy , thread_ptr) );
     
    120120        thread_ptr->remote_locks++;
    121121
    122 #if CONFIG_DEBUG_REMOTE_SPINLOCKS
     122#if DEBUG_REMOTE_SPINLOCKS
    123123hal_remote_swd( XPTR( lock_cxy  , &lock_ptr->owner ) ,
    124124                XPTR( local_cxy , thread_ptr) );
     
    143143        thread_t          * thread_ptr = CURRENT_THREAD;
    144144
    145 #if CONFIG_DEBUG_REMOTE_SPINLOCKS
     145#if DEBUG_REMOTE_SPINLOCKS
    146146hal_remote_swd( XPTR( lock_cxy , &lock_ptr->owner ) , XPTR_NULL );
    147147xlist_unlink( XPTR( lock_cxy , &lock_ptr->list ) );
     
    196196        thread_ptr->remote_locks++;
    197197
    198 #if CONFIG_DEBUG_REMOTE_SPINLOCKS
     198#if DEBUG_REMOTE_SPINLOCKS
    199199hal_remote_swd( XPTR( lock_cxy  , &lock_ptr->owner ),
    200200                XPTR( local_cxy , thread_ptr) );
     
    222222        thread_t          * thread_ptr = CURRENT_THREAD;
    223223
    224 #if CONFIG_DEBUG_REMOTE_SPINLOCKS
     224#if DEBUG_REMOTE_SPINLOCKS
    225225hal_remote_swd( XPTR( lock_cxy , &lock_ptr->owner ) , XPTR_NULL );
    226226xlist_unlink( XPTR( lock_cxy , &lock_ptr->list ) );
  • trunk/kernel/libk/remote_spinlock.h

    r436 r438  
    4141    volatile uint32_t     taken;       /*! free if 0 / taken if non zero             */
    4242
    43 #if CONFIG_DEBUG_REMOTE_SPINLOCKS
     43#if DEBUG_REMOTE_SPINLOCKS
    4444    xptr_t                owner;       /*! extended pointer on the owner thread      */
    4545    xlist_entry_t         list;        /*! list of all remote_lock taken by owner    */
  • trunk/kernel/libk/rwlock.c

    r436 r438  
    3838    lock->count   = 0;
    3939
    40 #if CONFIG_DEBUG_RWLOCKS
     40#if DEBUG_RWLOCKS
    4141lock->owner   = NULL;
    4242list_entry_init( &lock->list );
     
    7070    this->local_locks++;
    7171
    72 #if CONFIG_DEBUG_RWLOCKS
     72#if DEBUG_RWLOCKS
    7373list_add_first( &this->locks_root , &lock->list );
    7474#endif
     
    9898    this->local_locks--;
    9999
    100 #if CONFIG_DEBUG_RWLOCKS
     100#if DEBUG_RWLOCKS
    101101list_unlink( &lock->list );
    102102#endif
     
    138138    this->local_locks++;
    139139
    140 #if CONFIG_DEBUG_RWLOCKS
     140#if DEBUG_RWLOCKS
    141141lock->owner = this;
    142142list_add_first( &this->locks_root , &lock->list );
     
    157157        hal_disable_irq( &mode );
    158158 
    159 #if CONFIG_DEBUG_RWLOCKS
     159#if DEBUG_RWLOCKS
    160160lock->owner = NULL;
    161161list_unlink( &lock->list );
  • trunk/kernel/libk/rwlock.h

    r436 r438  
    5959    uint32_t            count;            /*! number of simultaneous readers threads      */
    6060
    61 #if CONFIG_DEBUG_RWLOCKS
     61#if DEBUG_RWLOCKS
    6262        struct thread_s   * owner;            /*! pointer on curent writer thread             */
    6363    list_entry_t        list;             /*! member of list of locks taken by owner      */
  • trunk/kernel/libk/spinlock.c

    r436 r438  
    3838    lock->taken = 0;
    3939
    40 #if CONFIG_DEBUG_SPINLOCKS
     40#if DEBUG_SPINLOCKS
    4141lock->owner = NULL;
    4242list_entry_init( &lock->list );
     
    7171    this->local_locks++;
    7272
    73 #if CONFIG_DEBUG_SPINLOCKS
     73#if DEBUG_SPINLOCKS
    7474lock->owner = this;
    7575list_add_first( &this->locks_root , &lock->list );
     
    8686    thread_t * this = CURRENT_THREAD;;
    8787
    88 #if CONFIG_DEBUG_SPINLOCKS
     88#if DEBUG_SPINLOCKS
    8989lock->owner = NULL;
    9090list_unlink( &lock->list );
     
    132132    this->local_locks++;
    133133
    134 #if CONFIG_DEBUG_SPINLOCKS
     134#if DEBUG_SPINLOCKS
    135135lock->owner = this;
    136136list_add_first( &this->locks_root , &lock->list );
     
    162162        this->local_locks++;
    163163
    164 #if CONFIG_DEBUG_SPINLOCKS
     164#if DEBUG_SPINLOCKS
    165165lock->owner = this;
    166166list_add_first( &this->locks_root , &lock->list );
     
    177177    thread_t * this = CURRENT_THREAD;
    178178
    179 #if CONFIG_DEBUG_SPINLOCKS
     179#if DEBUG_SPINLOCKS
    180180lock->owner = NULL;
    181181list_unlink( &lock->list );
  • trunk/kernel/libk/spinlock.h

    r436 r438  
    6262        uint32_t            taken;             /*! state : free if zero / taken if non zero  */
    6363
    64 #if CONFIG_DEBUG_SPINLOCKS
     64#if DEBUG_SPINLOCKS
    6565        struct thread_s   * owner;             /*! pointer on curent owner thread            */
    6666    list_entry_t        list;              /*! member of list of locks taken by owner    */
  • trunk/kernel/mm/kcm.c

    r437 r438  
    4848{
    4949
    50 #if CONFIG_DEBUG_KCM
     50#if DEBUG_KCM
    5151uint32_t cycle = (uint32_t)hal_get_cycles();
    52 if( CONFIG_DEBUG_KCM < cycle )
     52if( DEBUG_KCM < cycle )
    5353printk("\n[DBG] %s : thread %x enters for %s / page %x / count %d / active %d\n",
    5454__FUNCTION__ , CURRENT_THREAD , kmem_type_str( kcm->type ) ,
     
    8585                     + (index * kcm->block_size) );
    8686
    87 #if CONFIG_DEBUG_KCM
     87#if DEBUG_KCM
    8888cycle = (uint32_t)hal_get_cycles();
    89 if( CONFIG_DEBUG_KCM < cycle )
     89if( DEBUG_KCM < cycle )
    9090printk("\n[DBG] %s : thread %x exit / type  %s / ptr %p / page %x / count %d\n",
    9191__FUNCTION__ , CURRENT_THREAD , kmem_type_str( kcm->type ) , ptr ,
  • trunk/kernel/mm/kmem.c

    r435 r438  
    145145        assert( ((type > 1) && (type < KMEM_TYPES_NR) ) , __FUNCTION__ , "illegal KCM type" );
    146146
    147 #if CONFIG_DEBUG_KMEM
     147#if DEBUG_KMEM
    148148uint32_t cycle = (uint32_t)hal_get_cycles();
    149 if( CONFIG_DEBUG_KMEM < cycle )
     149if( DEBUG_KMEM < cycle )
    150150printk("\n[DBG] %s : thread %x enter / KCM type %s missing in cluster %x / cycle %d\n",
    151151__FUNCTION__, CURRENT_THREAD, kmem_type_str( type ), local_cxy, cycle );
     
    173173        hal_fence();
    174174
    175 #if CONFIG_DEBUG_KMEM
     175#if DEBUG_KMEM
    176176cycle = (uint32_t)hal_get_cycles();
    177 if( CONFIG_DEBUG_KMEM < cycle )
     177if( DEBUG_KMEM < cycle )
    178178printk("\n[DBG] %s : thread %x exit / cycle %d\n",
    179179__FUNCTION__, CURRENT_THREAD, cycle );
     
    200200        assert( (type < KMEM_TYPES_NR) , __FUNCTION__ , "illegal KMEM request type" );
    201201
    202 #if CONFIG_DEBUG_KMEM
     202#if DEBUG_KMEM
    203203uint32_t cycle = (uint32_t)hal_get_cycles();
    204 if( CONFIG_DEBUG_KMEM < cycle )
     204if( DEBUG_KMEM < cycle )
    205205printk("\n[DBG] %s : thread %x enter / type %s / cluster %x / cycle %d\n",
    206206__FUNCTION__, CURRENT_THREAD, kmem_type_str( type ), local_cxy, cycle );
     
    222222                if( flags & AF_ZERO ) page_zero( (page_t *)ptr );
    223223
    224 #if CONFIG_DEBUG_KMEM
     224#if DEBUG_KMEM
    225225cycle = (uint32_t)hal_get_cycles();
    226 if( CONFIG_DEBUG_KMEM < cycle )
     226if( DEBUG_KMEM < cycle )
    227227printk("\n[DBG] %s : thread %x exit / %d page(s) allocated / ppn %x / cycle %d\n",
    228228__FUNCTION__, CURRENT_THREAD, 1<<size, ppm_page2ppn(XPTR(local_cxy,ptr)), cycle );
     
    244244                if( flags & AF_ZERO ) memset( ptr , 0 , size );
    245245
    246 #if CONFIG_DEBUG_KMEM
     246#if DEBUG_KMEM
    247247cycle = (uint32_t)hal_get_cycles();
    248 if( CONFIG_DEBUG_KMEM < cycle )
     248if( DEBUG_KMEM < cycle )
    249249printk("\n[DBG] %s : thread %x exit / type %s allocated / base %x / size %d / cycle %d\n",
    250250__FUNCTION__, CURRENT_THREAD, kmem_type_str( type ), (intptr_t)ptr, size, cycle );
     
    275275                if( flags & AF_ZERO ) memset( ptr , 0 , kmem_type_size( type ) );
    276276
    277 #if CONFIG_DEBUG_KMEM
     277#if DEBUG_KMEM
    278278cycle = (uint32_t)hal_get_cycles();
    279 if( CONFIG_DEBUG_KMEM < cycle )
     279if( DEBUG_KMEM < cycle )
    280280printk("\n[DBG] %s : thread %x exit / type %s allocated / base %x / size %d / cycle %d\n",
    281281__FUNCTION__, CURRENT_THREAD, kmem_type_str(type), (intptr_t)ptr,
  • trunk/kernel/mm/mapper.c

    r435 r438  
    143143    error_t       error;
    144144
    145 #if CONFIG_DEBUG_MAPPER_GET_PAGE
     145#if DEBUG_MAPPER_GET_PAGE
    146146uint32_t cycle = (uint32_t)hal_get_cycles();
    147 if( CONFIG_DEBUG_MAPPER_GET_PAGE < cycle )
     147if( DEBUG_MAPPER_GET_PAGE < cycle )
    148148printk("\n[DBG] %s : thread %x enter for page %d / mapper %x / cycle %d\n",
    149149__FUNCTION__ , CURRENT_THREAD , index , mapper , cycle );
     
    175175        {
    176176
    177 #if (CONFIG_DEBUG_MAPPER_GET_PAGE & 1)
    178 if( CONFIG_DEBUG_MAPPER_GET_PAGE < cycle )
     177#if (DEBUG_MAPPER_GET_PAGE & 1)
     178if( DEBUG_MAPPER_GET_PAGE < cycle )
    179179printk("\n[DBG] %s : missing page => load from device\n", __FUNCTION__ );
    180180#endif
     
    257257    }
    258258
    259 #if CONFIG_DEBUG_MAPPER_GET_PAGE
     259#if DEBUG_MAPPER_GET_PAGE
    260260cycle = (uint32_t)hal_get_cycles();
    261 if( CONFIG_DEBUG_MAPPER_GET_PAGE < cycle )
     261if( DEBUG_MAPPER_GET_PAGE < cycle )
    262262printk("\n[DBG] %s : thread %x exit for page %d / ppn %x / cycle %d\n",
    263263__FUNCTION__, CURRENT_THREAD, index, ppm_page2ppn(XPTR(local_cxy, page)), cycle );
     
    317317    uint8_t  * buf_ptr;        // current buffer  address
    318318
    319 #if CONFIG_DEBUG_MAPPER_MOVE_USER
     319#if DEBUG_MAPPER_MOVE_USER
    320320uint32_t cycle = (uint32_t)hal_get_cycles();
    321 if( CONFIG_DEBUG_MAPPER_MOVE_USER < cycle )
     321if( DEBUG_MAPPER_MOVE_USER < cycle )
    322322printk("\n[DBG] %s : thread %x enter / to_buf %d / buffer %x / cycle %d\n",
    323323__FUNCTION__ , CURRENT_THREAD , to_buffer , buffer , cycle );
     
    347347        else                       page_count = CONFIG_PPM_PAGE_SIZE;
    348348
    349 #if (CONFIG_DEBUG_MAPPER_MOVE_USER & 1)
    350 if( CONFIG_DEBUG_MAPPER_MOVE_USER < cycle )
     349#if (DEBUG_MAPPER_MOVE_USER & 1)
     350if( DEBUG_MAPPER_MOVE_USER < cycle )
    351351printk("\n[DBG] %s : index = %d / offset = %d / count = %d\n",
    352352__FUNCTION__ , index , page_offset , page_count );
     
    379379    }
    380380
    381 #if CONFIG_DEBUG_MAPPER_MOVE_USER
     381#if DEBUG_MAPPER_MOVE_USER
    382382cycle = (uint32_t)hal_get_cycles();
    383 if( CONFIG_DEBUG_MAPPER_MOVE_USER < cycle )
     383if( DEBUG_MAPPER_MOVE_USER < cycle )
    384384printk("\n[DBG] %s : thread %x exit / to_buf %d / buffer %x / cycle %d\n",
    385385__FUNCTION__ , CURRENT_THREAD , to_buffer , buffer , cycle );
     
    412412    uint8_t * buffer_ptr = (uint8_t *)GET_PTR( buffer_xp );
    413413
    414 #if CONFIG_DEBUG_MAPPER_MOVE_KERNEL
     414#if DEBUG_MAPPER_MOVE_KERNEL
    415415uint32_t cycle = (uint32_t)hal_get_cycles();
    416 if( CONFIG_DEBUG_MAPPER_MOVE_KERNEL < cycle )
     416if( DEBUG_MAPPER_MOVE_KERNEL < cycle )
    417417printk("\n[DBG] %s : thread %x enter / to_buf %d / buf_cxy %x / buf_ptr %x / cycle %d\n",
    418418__FUNCTION__ , CURRENT_THREAD , to_buffer , buffer_cxy , buffer_ptr , cycle );
     
    427427    uint32_t last  = max_byte >> CONFIG_PPM_PAGE_SHIFT;
    428428
    429 #if (CONFIG_DEBUG_MAPPER_MOVE_KERNEL & 1)
    430 if( CONFIG_DEBUG_MAPPER_MOVE_KERNEL < cycle )
     429#if (DEBUG_MAPPER_MOVE_KERNEL & 1)
     430if( DEBUG_MAPPER_MOVE_KERNEL < cycle )
    431431printk("\n[DBG] %s : first_page %d / last_page %d\n", __FUNCTION__, first, last );
    432432#endif
     
    459459        else                       page_count = CONFIG_PPM_PAGE_SIZE;
    460460
    461 #if (CONFIG_DEBUG_MAPPER_MOVE_KERNEL & 1)
    462 if( CONFIG_DEBUG_MAPPER_MOVE_KERNEL < cycle )
     461#if (DEBUG_MAPPER_MOVE_KERNEL & 1)
     462if( DEBUG_MAPPER_MOVE_KERNEL < cycle )
    463463printk("\n[DBG] %s : page_index = %d / offset = %d / bytes = %d\n",
    464464__FUNCTION__ , index , page_offset , page_count );
     
    494494    }
    495495
    496 #if CONFIG_DEBUG_MAPPER_MOVE_KERNEL
     496#if DEBUG_MAPPER_MOVE_KERNEL
    497497cycle = (uint32_t)hal_get_cycles();
    498 if( CONFIG_DEBUG_MAPPER_MOVE_KERNEL < cycle )
     498if( DEBUG_MAPPER_MOVE_KERNEL < cycle )
    499499printk("\n[DBG] %s : thread %x exit / to_buf %d / buf_cxy %x / buf_ptr %x / cycle %d\n",
    500500__FUNCTION__ , CURRENT_THREAD , to_buffer , buffer_cxy , buffer_ptr , cycle );
  • trunk/kernel/mm/ppm.c

    r437 r438  
    201201        uint32_t   current_size;
    202202 
    203 #if CONFIG_DEBUG_PPM_ALLOC_PAGES
     203#if DEBUG_PPM_ALLOC_PAGES
    204204uint32_t cycle = (uint32_t)hal_get_cycles();
    205 if( CONFIG_DEBUG_PPM_ALLOC_PAGES < cycle )
     205if( DEBUG_PPM_ALLOC_PAGES < cycle )
    206206printk("\n[DBG] in %s : thread %x enter for %d page(s) / cycle %d\n",
    207207__FUNCTION__ , CURRENT_THREAD , 1<<order, cycle );
    208208#endif
    209209
    210 #if(CONFIG_DEBUG_PPM_ALLOC_PAGES & 0x1)
    211 if( CONFIG_DEBUG_PPM_ALLOC_PAGES < cycle )
     210#if(DEBUG_PPM_ALLOC_PAGES & 0x1)
     211if( DEBUG_PPM_ALLOC_PAGES < cycle )
    212212ppm_print();
    213213#endif
     
    239239                spinlock_unlock( &ppm->free_lock );
    240240
    241 #if CONFIG_DEBUG_PPM_ALLOC_PAGES
     241#if DEBUG_PPM_ALLOC_PAGES
    242242cycle = (uint32_t)hal_get_cycles();
    243 if( CONFIG_DEBUG_PPM_ALLOC_PAGES < cycle )
     243if( DEBUG_PPM_ALLOC_PAGES < cycle )
    244244printk("\n[DBG] in %s : thread %x cannot allocate %d page(s) at cycle %d\n",
    245245__FUNCTION__ , CURRENT_THREAD , 1<<order, cycle );
     
    275275        spinlock_unlock( &ppm->free_lock );
    276276
    277 #if CONFIG_DEBUG_PPM_ALLOC_PAGES
     277#if DEBUG_PPM_ALLOC_PAGES
    278278cycle = (uint32_t)hal_get_cycles();
    279 if( CONFIG_DEBUG_PPM_ALLOC_PAGES < cycle )
     279if( DEBUG_PPM_ALLOC_PAGES < cycle )
    280280printk("\n[DBG] in %s : thread %x exit / %d page(s) allocated / ppn = %x / cycle %d\n",
    281281__FUNCTION__, CURRENT_THREAD, 1<<order, ppm_page2ppn(XPTR( local_cxy , block )), cycle );
     
    292292        ppm_t * ppm = &LOCAL_CLUSTER->ppm;
    293293
    294 #if CONFIG_DEBUG_PPM_FREE_PAGES
     294#if DEBUG_PPM_FREE_PAGES
    295295uint32_t cycle = (uint32_t)hal_get_cycles();
    296 if( CONFIG_DEBUG_PPM_FREE_PAGES < cycle )
     296if( DEBUG_PPM_FREE_PAGES < cycle )
    297297printk("\n[DBG] in %s : thread %x enter for %d page(s) / cycle %d\n",
    298298__FUNCTION__ , CURRENT_THREAD , 1<<page->order , cycle );
    299299#endif
    300300
    301 #if(CONFIG_DEBUG_PPM_FREE_PAGES & 0x1)
    302 if( CONFIG_DEBUG_PPM_FREE_PAGES < cycle )
     301#if(DEBUG_PPM_FREE_PAGES & 0x1)
     302if( DEBUG_PPM_FREE_PAGES < cycle )
    303303ppm_print();
    304304#endif
     
    312312        spinlock_unlock( &ppm->free_lock );
    313313
    314 #if CONFIG_DEBUG_PPM_FREE_PAGES
     314#if DEBUG_PPM_FREE_PAGES
    315315cycle = (uint32_t)hal_get_cycles();
    316 if( CONFIG_DEBUG_PPM_FREE_PAGES < cycle )
     316if( DEBUG_PPM_FREE_PAGES < cycle )
    317317printk("\n[DBG] in %s : thread %x exit / %d page(s) released / ppn = %x / cycle %d\n",
    318318__FUNCTION__, CURRENT_THREAD, 1<<page->order, ppm_page2ppn(XPTR(local_cxy , page)), cycle );
  • trunk/kernel/mm/vmm.c

    r437 r438  
    6363    intptr_t  size;
    6464
    65 #if CONFIG_DEBUG_VMM_INIT
     65#if DEBUG_VMM_INIT
    6666uint32_t cycle = (uint32_t)hal_get_cycles();
    67 if( CONFIG_DEBUG_VMM_INIT )
     67if( DEBUG_VMM_INIT )
    6868printk("\n[DBG] %s : thread %x enter for process %x / cycle %d\n",
    6969__FUNCTION__ , CURRENT_THREAD , process->pid , cycle );
     
    183183    hal_fence();
    184184
    185 #if CONFIG_DEBUG_VMM_INIT
     185#if DEBUG_VMM_INIT
    186186cycle = (uint32_t)hal_get_cycles();
    187 if( CONFIG_DEBUG_VMM_INIT )
     187if( DEBUG_VMM_INIT )
    188188printk("\n[DBG] %s : thread %x exit for process %x / entry_point = %x / cycle %d\n",
    189189__FUNCTION__ , CURRENT_THREAD , process->pid , process->vmm.entry_point , cycle );
     
    266266    lpid_t          owner_lpid;
    267267
    268 #if CONFIG_DEBUG_VMM_UPDATE_PTE
     268#if DEBUG_VMM_UPDATE_PTE
    269269uint32_t cycle = (uint32_t)hal_get_cycles();
    270 if( CONFIG_DEBUG_VMM_UPDATE_PTE < cycle )
     270if( DEBUG_VMM_UPDATE_PTE < cycle )
    271271printk("\n[DBG] %s : thread %x enter for process %x / vpn %x / cycle %d\n",
    272272__FUNCTION__ , CURRENT_THREAD , process->pid , vpn , cycle );
     
    292292        remote_process_cxy = GET_CXY( remote_process_xp );
    293293
    294 #if (CONFIG_DEBUG_VMM_UPDATE_PTE & 0x1)
    295 if( CONFIG_DEBUG_VMM_UPDATE_PTE < cycle )
     294#if (DEBUG_VMM_UPDATE_PTE & 0x1)
     295if( DEBUG_VMM_UPDATE_PTE < cycle )
    296296printk("\n[DBG] %s : thread %x handling process %x in cluster %x\n",
    297297__FUNCTION__ , CURRENT_THREAD , process->pid , remote_process_cxy );
     
    305305    } 
    306306
    307 #if CONFIG_DEBUG_VMM_UPDATE_PTE
     307#if DEBUG_VMM_UPDATE_PTE
    308308cycle = (uint32_t)hal_get_cycles();
    309 if( CONFIG_DEBUG_VMM_UPDATE_PTE < cycle )
     309if( DEBUG_VMM_UPDATE_PTE < cycle )
    310310printk("\n[DBG] %s : thread %x exit for process %x / vpn %x / cycle %d\n",
    311311__FUNCTION__ , CURRENT_THREAD , process->pid , vpn , cycle );
     
    338338    lpid_t          owner_lpid;
    339339
    340 #if CONFIG_DEBUG_VMM_SET_COW
     340#if DEBUG_VMM_SET_COW
    341341uint32_t cycle = (uint32_t)hal_get_cycles();
    342 if( CONFIG_DEBUG_VMM_SET_COW < cycle )
     342if( DEBUG_VMM_SET_COW < cycle )
    343343printk("\n[DBG] %s : thread %x enter for process %x / cycle %d\n",
    344344__FUNCTION__ , CURRENT_THREAD , process->pid , cycle );
     
    370370        remote_process_cxy = GET_CXY( remote_process_xp );
    371371
    372 #if (CONFIG_DEBUG_VMM_SET_COW &0x1)
    373 if( CONFIG_DEBUG_VMM_SET_COW < cycle )
     372#if (DEBUG_VMM_SET_COW &0x1)
     373if( DEBUG_VMM_SET_COW < cycle )
    374374printk("\n[DBG] %s : thread %x handling process %x in cluster %x\n",
    375375__FUNCTION__ , CURRENT_THREAD , process->pid , remote_process_cxy );
     
    394394            vpn_t    vpn_size = vseg->vpn_size;
    395395
    396 #if (CONFIG_DEBUG_VMM_SET_COW & 0x1)
    397 if( CONFIG_DEBUG_VMM_SET_COW < cycle )
     396#if (DEBUG_VMM_SET_COW & 0x1)
     397if( DEBUG_VMM_SET_COW < cycle )
    398398printk("\n[DBG] %s : thread %x handling vseg %s / vpn_base = %x / vpn_size = %x\n",
    399399__FUNCTION__, CURRENT_THREAD , vseg_type_str(type), vpn_base, vpn_size );
     
    445445    }   // end loop on process copies
    446446 
    447 #if CONFIG_DEBUG_VMM_SET_COW
     447#if DEBUG_VMM_SET_COW
    448448cycle = (uint32_t)hal_get_cycles();
    449 if( CONFIG_DEBUG_VMM_SET_COW < cycle )
     449if( DEBUG_VMM_SET_COW < cycle )
    450450printk("\n[DBG] %s : thread %x exit for process %x / cycle %d\n",
    451451__FUNCTION__ , CURRENT_THREAD , process->pid , cycle );
     
    480480    ppn_t       ppn;
    481481
    482 #if CONFIG_DEBUG_VMM_FORK_COPY
     482#if DEBUG_VMM_FORK_COPY
    483483uint32_t cycle = (uint32_t)hal_get_cycles();
    484 if( CONFIG_DEBUG_VMM_FORK_COPY < cycle )
     484if( DEBUG_VMM_FORK_COPY < cycle )
    485485printk("\n[DBG] %s : thread %x enter / cycle %d\n",
    486486__FUNCTION__ , CURRENT_THREAD, cycle );
     
    530530        type = hal_remote_lw( XPTR( parent_cxy , &parent_vseg->type ) );
    531531       
    532 #if CONFIG_DEBUG_VMM_FORK_COPY
     532#if DEBUG_VMM_FORK_COPY
    533533cycle = (uint32_t)hal_get_cycles();
    534 if( CONFIG_DEBUG_VMM_FORK_COPY < cycle )
     534if( DEBUG_VMM_FORK_COPY < cycle )
    535535printk("\n[DBG] %s : thread %x found parent vseg %s / vpn_base = %x / cycle %d\n",
    536536__FUNCTION__ , CURRENT_THREAD, vseg_type_str(type),
     
    556556            vseg_attach( child_vmm , child_vseg );
    557557
    558 #if CONFIG_DEBUG_VMM_FORK_COPY
     558#if DEBUG_VMM_FORK_COPY
    559559cycle = (uint32_t)hal_get_cycles();
    560 if( CONFIG_DEBUG_VMM_FORK_COPY < cycle )
     560if( DEBUG_VMM_FORK_COPY < cycle )
    561561printk("\n[DBG] %s : thread %x copied vseg %s / vpn_base = %x to child VSL / cycle %d\n",
    562562__FUNCTION__ , CURRENT_THREAD , vseg_type_str(type),
     
    597597                        hal_remote_atomic_add( XPTR( page_cxy , &page_ptr->forks ) , 1 );
    598598
    599 #if CONFIG_DEBUG_VMM_FORK_COPY
     599#if DEBUG_VMM_FORK_COPY
    600600cycle = (uint32_t)hal_get_cycles();
    601 if( CONFIG_DEBUG_VMM_FORK_COPY < cycle )
     601if( DEBUG_VMM_FORK_COPY < cycle )
    602602printk("\n[DBG] %s : thread %x copied vpn %x to child GPT / cycle %d\n",
    603603__FUNCTION__ , CURRENT_THREAD , vpn , cycle );
     
    649649    hal_fence();
    650650
    651 #if CONFIG_DEBUG_VMM_FORK_COPY
     651#if DEBUG_VMM_FORK_COPY
    652652cycle = (uint32_t)hal_get_cycles();
    653 if( CONFIG_DEBUG_VMM_FORK_COPY < cycle )
     653if( DEBUG_VMM_FORK_COPY < cycle )
    654654printk("\n[DBG] %s : thread %x exit successfully / cycle %d\n",
    655655__FUNCTION__ , CURRENT_THREAD , cycle );
     
    666666        vseg_t * vseg;
    667667
    668 #if CONFIG_DEBUG_VMM_DESTROY
     668#if DEBUG_VMM_DESTROY
    669669uint32_t cycle = (uint32_t)hal_get_cycles();
    670 if( CONFIG_DEBUG_VMM_DESTROY < cycle )
     670if( DEBUG_VMM_DESTROY < cycle )
    671671printk("\n[DBG] %s : thread %x enter for process %x / cycle %d\n",
    672672__FUNCTION__ , CURRENT_THREAD , process->pid , cycle );
    673673#endif
    674674
    675 #if (CONFIG_DEBUG_VMM_DESTROY & 1 )
     675#if (DEBUG_VMM_DESTROY & 1 )
    676676vmm_display( process , true );
    677677#endif
     
    694694        vseg    = GET_PTR( vseg_xp );
    695695
    696 #if( CONFIG_DEBUG_VMM_DESTROY & 1 )
    697 if( CONFIG_DEBUG_VMM_DESTROY < cycle )
     696#if( DEBUG_VMM_DESTROY & 1 )
     697if( DEBUG_VMM_DESTROY < cycle )
    698698printk("\n[DBG] %s : %s / vpn_base %x / vpn_size %d\n",
    699699__FUNCTION__ , vseg_type_str( vseg->type ), vseg->vpn_base, vseg->vpn_size );
     
    728728    hal_gpt_destroy( &vmm->gpt );
    729729
    730 #if CONFIG_DEBUG_VMM_DESTROY
     730#if DEBUG_VMM_DESTROY
    731731cycle = (uint32_t)hal_get_cycles();
    732 if( CONFIG_DEBUG_VMM_DESTROY < cycle )
     732if( DEBUG_VMM_DESTROY < cycle )
    733733printk("\n[DBG] %s : thread %x exit / cycle %d\n",
    734734__FUNCTION__ , CURRENT_THREAD , cycle );
     
    882882        error_t      error;
    883883
    884 #if CONFIG_DEBUG_VMM_CREATE_VSEG
     884#if DEBUG_VMM_CREATE_VSEG
    885885uint32_t cycle = (uint32_t)hal_get_cycles();
    886 if( CONFIG_DEBUG_VMM_CREATE_VSEG < cycle )
     886if( DEBUG_VMM_CREATE_VSEG < cycle )
    887887printk("\n[DBG] %s : thread %x enter / process %x / base %x / size %x / %s / cxy %x / cycle %d\n",
    888888__FUNCTION__, CURRENT_THREAD, process->pid, base, size, vseg_type_str(type), cxy, cycle );
     
    973973        remote_rwlock_wr_unlock( lock_xp );
    974974
    975 #if CONFIG_DEBUG_VMM_CREATE_VSEG
     975#if DEBUG_VMM_CREATE_VSEG
    976976cycle = (uint32_t)hal_get_cycles();
    977 if( CONFIG_DEBUG_VMM_CREATE_VSEG < cycle )
     977if( DEBUG_VMM_CREATE_VSEG < cycle )
    978978printk("\n[DBG] %s : thread %x exit / process %x / %s / cxy %x / cycle %d\n",
    979979__FUNCTION__, CURRENT_THREAD, process->pid, vseg_type_str(type), cxy, cycle );
     
    11101110    uint32_t    count;      // actual number of pendinf forks
    11111111
    1112 #if CONFIG_DEBUG_VMM_UNMAP_VSEG
     1112#if DEBUG_VMM_UNMAP_VSEG
    11131113uint32_t cycle = (uint32_t)hal_get_cycles();
    1114 if( CONFIG_DEBUG_VMM_UNMAP_VSEG < cycle )
     1114if( DEBUG_VMM_UNMAP_VSEG < cycle )
    11151115printk("\n[DBG] %s : thread %x enter / process %x / vseg %s / base %x / cycle %d\n",
    11161116__FUNCTION__, CURRENT_THREAD, process->pid, vseg_type_str( vseg->type ), vseg->vpn_base, cycle );
     
    11311131        {
    11321132
    1133 #if( CONFIG_DEBUG_VMM_UNMAP_VSEG & 1 )
    1134 if( CONFIG_DEBUG_VMM_UNMAP_VSEG < cycle )
     1133#if( DEBUG_VMM_UNMAP_VSEG & 1 )
     1134if( DEBUG_VMM_UNMAP_VSEG < cycle )
    11351135printk("- vpn %x / ppn %x\n" , vpn , ppn );
    11361136#endif
     
    11831183    }
    11841184
    1185 #if CONFIG_DEBUG_VMM_UNMAP_VSEG
     1185#if DEBUG_VMM_UNMAP_VSEG
    11861186cycle = (uint32_t)hal_get_cycles();
    1187 if( CONFIG_DEBUG_VMM_UNMAP_VSEG < cycle )
     1187if( DEBUG_VMM_UNMAP_VSEG < cycle )
    11881188printk("\n[DBG] %s : thread %x exit / process %x / vseg %s / base %x / cycle %d\n",
    11891189__FUNCTION__, CURRENT_THREAD, process->pid, vseg_type_str( vseg->type ), vseg->vpn_base, cycle );
     
    13831383{
    13841384
    1385 #if CONFIG_DEBUG_VMM_ALLOCATE_PAGE
    1386 if( CONFIG_DEBUG_VMM_ALLOCATE_PAGE < (uint32_t)hal_get_cycles() )
     1385#if DEBUG_VMM_ALLOCATE_PAGE
     1386if( DEBUG_VMM_ALLOCATE_PAGE < (uint32_t)hal_get_cycles() )
    13871387printk("\n[DBG] in %s : thread %x enter for vpn %x\n",
    13881388__FUNCTION__ , CURRENT_THREAD, vpn );
     
    14271427    }
    14281428
    1429 #if CONFIG_DEBUG_VMM_ALLOCATE_PAGE
    1430 if( CONFIG_DEBUG_VMM_ALLOCATE_PAGE < (uint32_t)hal_get_cycles() )
     1429#if DEBUG_VMM_ALLOCATE_PAGE
     1430if( DEBUG_VMM_ALLOCATE_PAGE < (uint32_t)hal_get_cycles() )
    14311431printk("\n[DBG] in %s : thread %x exit for vpn = %d / ppn = %x\n",
    14321432__FUNCTION__ , CURRENT_THREAD, vpn, ppm_page2ppn( XPTR( page_cxy , page_ptr ) ) );
     
    14521452    index     = vpn - vseg->vpn_base;
    14531453
    1454 #if CONFIG_DEBUG_VMM_GET_ONE_PPN
    1455 if( CONFIG_DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() )
     1454#if DEBUG_VMM_GET_ONE_PPN
     1455if( DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() )
    14561456printk("\n[DBG] %s : thread %x enter for vpn = %x / type = %s / index = %d\n",
    14571457__FUNCTION__, CURRENT_THREAD, vpn, vseg_type_str(type), index );
     
    15151515            uint32_t elf_offset = vseg->file_offset + offset;
    15161516
    1517 #if (CONFIG_DEBUG_VMM_GET_ONE_PPN & 0x1)
    1518 if( CONFIG_DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() )
     1517#if (DEBUG_VMM_GET_ONE_PPN & 0x1)
     1518if( DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() )
    15191519printk("\n[DBG] %s : thread %x for vpn = %x / elf_offset = %x\n",
    15201520__FUNCTION__, CURRENT_THREAD, vpn, elf_offset );
     
    15301530            {
    15311531
    1532 #if (CONFIG_DEBUG_VMM_GET_ONE_PPN & 0x1)
    1533 if( CONFIG_DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() )
     1532#if (DEBUG_VMM_GET_ONE_PPN & 0x1)
     1533if( DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() )
    15341534printk("\n[DBG] %s : thread%x for vpn = %x / fully in BSS\n",
    15351535__FUNCTION__, CURRENT_THREAD, vpn );
     
    15481548            {
    15491549
    1550 #if (CONFIG_DEBUG_VMM_GET_ONE_PPN & 0x1)
    1551 if( CONFIG_DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() )
     1550#if (DEBUG_VMM_GET_ONE_PPN & 0x1)
     1551if( DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() )
    15521552printk("\n[DBG] %s : thread %x, for vpn = %x / fully in mapper\n",
    15531553__FUNCTION__, CURRENT_THREAD, vpn );
     
    15801580            {
    15811581
    1582 #if (CONFIG_DEBUG_VMM_GET_ONE_PPN & 0x1)
    1583 if( CONFIG_DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() )
     1582#if (DEBUG_VMM_GET_ONE_PPN & 0x1)
     1583if( DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() )
    15841584printk("\n[DBG] %s : thread %x for vpn = %x / both mapper & BSS\n"
    15851585"      %d bytes from mapper / %d bytes from BSS\n",
     
    16271627    *ppn = ppm_page2ppn( page_xp );
    16281628
    1629 #if CONFIG_DEBUG_VMM_GET_ONE_PPN
    1630 if( CONFIG_DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() )
     1629#if DEBUG_VMM_GET_ONE_PPN
     1630if( DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() )
    16311631printk("\n[DBG] %s : thread %x exit for vpn = %x / ppn = %x\n",
    16321632__FUNCTION__ , CURRENT_THREAD , vpn , *ppn );