Changeset 438 for trunk/kernel/mm/vmm.c


Ignore:
Timestamp:
Apr 4, 2018, 2:49:02 PM (6 years ago)
Author:
alain
Message:

Fix a bug in scheduler related to RPC blocking.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/kernel/mm/vmm.c

    r437 r438  
    6363    intptr_t  size;
    6464
    65 #if CONFIG_DEBUG_VMM_INIT
     65#if DEBUG_VMM_INIT
    6666uint32_t cycle = (uint32_t)hal_get_cycles();
    67 if( CONFIG_DEBUG_VMM_INIT )
     67if( DEBUG_VMM_INIT )
    6868printk("\n[DBG] %s : thread %x enter for process %x / cycle %d\n",
    6969__FUNCTION__ , CURRENT_THREAD , process->pid , cycle );
     
    183183    hal_fence();
    184184
    185 #if CONFIG_DEBUG_VMM_INIT
     185#if DEBUG_VMM_INIT
    186186cycle = (uint32_t)hal_get_cycles();
    187 if( CONFIG_DEBUG_VMM_INIT )
     187if( DEBUG_VMM_INIT )
    188188printk("\n[DBG] %s : thread %x exit for process %x / entry_point = %x / cycle %d\n",
    189189__FUNCTION__ , CURRENT_THREAD , process->pid , process->vmm.entry_point , cycle );
     
    266266    lpid_t          owner_lpid;
    267267
    268 #if CONFIG_DEBUG_VMM_UPDATE_PTE
     268#if DEBUG_VMM_UPDATE_PTE
    269269uint32_t cycle = (uint32_t)hal_get_cycles();
    270 if( CONFIG_DEBUG_VMM_UPDATE_PTE < cycle )
     270if( DEBUG_VMM_UPDATE_PTE < cycle )
    271271printk("\n[DBG] %s : thread %x enter for process %x / vpn %x / cycle %d\n",
    272272__FUNCTION__ , CURRENT_THREAD , process->pid , vpn , cycle );
     
    292292        remote_process_cxy = GET_CXY( remote_process_xp );
    293293
    294 #if (CONFIG_DEBUG_VMM_UPDATE_PTE & 0x1)
    295 if( CONFIG_DEBUG_VMM_UPDATE_PTE < cycle )
     294#if (DEBUG_VMM_UPDATE_PTE & 0x1)
     295if( DEBUG_VMM_UPDATE_PTE < cycle )
    296296printk("\n[DBG] %s : thread %x handling process %x in cluster %x\n",
    297297__FUNCTION__ , CURRENT_THREAD , process->pid , remote_process_cxy );
     
    305305    } 
    306306
    307 #if CONFIG_DEBUG_VMM_UPDATE_PTE
     307#if DEBUG_VMM_UPDATE_PTE
    308308cycle = (uint32_t)hal_get_cycles();
    309 if( CONFIG_DEBUG_VMM_UPDATE_PTE < cycle )
     309if( DEBUG_VMM_UPDATE_PTE < cycle )
    310310printk("\n[DBG] %s : thread %x exit for process %x / vpn %x / cycle %d\n",
    311311__FUNCTION__ , CURRENT_THREAD , process->pid , vpn , cycle );
     
    338338    lpid_t          owner_lpid;
    339339
    340 #if CONFIG_DEBUG_VMM_SET_COW
     340#if DEBUG_VMM_SET_COW
    341341uint32_t cycle = (uint32_t)hal_get_cycles();
    342 if( CONFIG_DEBUG_VMM_SET_COW < cycle )
     342if( DEBUG_VMM_SET_COW < cycle )
    343343printk("\n[DBG] %s : thread %x enter for process %x / cycle %d\n",
    344344__FUNCTION__ , CURRENT_THREAD , process->pid , cycle );
     
    370370        remote_process_cxy = GET_CXY( remote_process_xp );
    371371
    372 #if (CONFIG_DEBUG_VMM_SET_COW &0x1)
    373 if( CONFIG_DEBUG_VMM_SET_COW < cycle )
     372#if (DEBUG_VMM_SET_COW &0x1)
     373if( DEBUG_VMM_SET_COW < cycle )
    374374printk("\n[DBG] %s : thread %x handling process %x in cluster %x\n",
    375375__FUNCTION__ , CURRENT_THREAD , process->pid , remote_process_cxy );
     
    394394            vpn_t    vpn_size = vseg->vpn_size;
    395395
    396 #if (CONFIG_DEBUG_VMM_SET_COW & 0x1)
    397 if( CONFIG_DEBUG_VMM_SET_COW < cycle )
     396#if (DEBUG_VMM_SET_COW & 0x1)
     397if( DEBUG_VMM_SET_COW < cycle )
    398398printk("\n[DBG] %s : thread %x handling vseg %s / vpn_base = %x / vpn_size = %x\n",
    399399__FUNCTION__, CURRENT_THREAD , vseg_type_str(type), vpn_base, vpn_size );
     
    445445    }   // end loop on process copies
    446446 
    447 #if CONFIG_DEBUG_VMM_SET_COW
     447#if DEBUG_VMM_SET_COW
    448448cycle = (uint32_t)hal_get_cycles();
    449 if( CONFIG_DEBUG_VMM_SET_COW < cycle )
     449if( DEBUG_VMM_SET_COW < cycle )
    450450printk("\n[DBG] %s : thread %x exit for process %x / cycle %d\n",
    451451__FUNCTION__ , CURRENT_THREAD , process->pid , cycle );
     
    480480    ppn_t       ppn;
    481481
    482 #if CONFIG_DEBUG_VMM_FORK_COPY
     482#if DEBUG_VMM_FORK_COPY
    483483uint32_t cycle = (uint32_t)hal_get_cycles();
    484 if( CONFIG_DEBUG_VMM_FORK_COPY < cycle )
     484if( DEBUG_VMM_FORK_COPY < cycle )
    485485printk("\n[DBG] %s : thread %x enter / cycle %d\n",
    486486__FUNCTION__ , CURRENT_THREAD, cycle );
     
    530530        type = hal_remote_lw( XPTR( parent_cxy , &parent_vseg->type ) );
    531531       
    532 #if CONFIG_DEBUG_VMM_FORK_COPY
     532#if DEBUG_VMM_FORK_COPY
    533533cycle = (uint32_t)hal_get_cycles();
    534 if( CONFIG_DEBUG_VMM_FORK_COPY < cycle )
     534if( DEBUG_VMM_FORK_COPY < cycle )
    535535printk("\n[DBG] %s : thread %x found parent vseg %s / vpn_base = %x / cycle %d\n",
    536536__FUNCTION__ , CURRENT_THREAD, vseg_type_str(type),
     
    556556            vseg_attach( child_vmm , child_vseg );
    557557
    558 #if CONFIG_DEBUG_VMM_FORK_COPY
     558#if DEBUG_VMM_FORK_COPY
    559559cycle = (uint32_t)hal_get_cycles();
    560 if( CONFIG_DEBUG_VMM_FORK_COPY < cycle )
     560if( DEBUG_VMM_FORK_COPY < cycle )
    561561printk("\n[DBG] %s : thread %x copied vseg %s / vpn_base = %x to child VSL / cycle %d\n",
    562562__FUNCTION__ , CURRENT_THREAD , vseg_type_str(type),
     
    597597                        hal_remote_atomic_add( XPTR( page_cxy , &page_ptr->forks ) , 1 );
    598598
    599 #if CONFIG_DEBUG_VMM_FORK_COPY
     599#if DEBUG_VMM_FORK_COPY
    600600cycle = (uint32_t)hal_get_cycles();
    601 if( CONFIG_DEBUG_VMM_FORK_COPY < cycle )
     601if( DEBUG_VMM_FORK_COPY < cycle )
    602602printk("\n[DBG] %s : thread %x copied vpn %x to child GPT / cycle %d\n",
    603603__FUNCTION__ , CURRENT_THREAD , vpn , cycle );
     
    649649    hal_fence();
    650650
    651 #if CONFIG_DEBUG_VMM_FORK_COPY
     651#if DEBUG_VMM_FORK_COPY
    652652cycle = (uint32_t)hal_get_cycles();
    653 if( CONFIG_DEBUG_VMM_FORK_COPY < cycle )
     653if( DEBUG_VMM_FORK_COPY < cycle )
    654654printk("\n[DBG] %s : thread %x exit successfully / cycle %d\n",
    655655__FUNCTION__ , CURRENT_THREAD , cycle );
     
    666666        vseg_t * vseg;
    667667
    668 #if CONFIG_DEBUG_VMM_DESTROY
     668#if DEBUG_VMM_DESTROY
    669669uint32_t cycle = (uint32_t)hal_get_cycles();
    670 if( CONFIG_DEBUG_VMM_DESTROY < cycle )
     670if( DEBUG_VMM_DESTROY < cycle )
    671671printk("\n[DBG] %s : thread %x enter for process %x / cycle %d\n",
    672672__FUNCTION__ , CURRENT_THREAD , process->pid , cycle );
    673673#endif
    674674
    675 #if (CONFIG_DEBUG_VMM_DESTROY & 1 )
     675#if (DEBUG_VMM_DESTROY & 1 )
    676676vmm_display( process , true );
    677677#endif
     
    694694        vseg    = GET_PTR( vseg_xp );
    695695
    696 #if( CONFIG_DEBUG_VMM_DESTROY & 1 )
    697 if( CONFIG_DEBUG_VMM_DESTROY < cycle )
     696#if( DEBUG_VMM_DESTROY & 1 )
     697if( DEBUG_VMM_DESTROY < cycle )
    698698printk("\n[DBG] %s : %s / vpn_base %x / vpn_size %d\n",
    699699__FUNCTION__ , vseg_type_str( vseg->type ), vseg->vpn_base, vseg->vpn_size );
     
    728728    hal_gpt_destroy( &vmm->gpt );
    729729
    730 #if CONFIG_DEBUG_VMM_DESTROY
     730#if DEBUG_VMM_DESTROY
    731731cycle = (uint32_t)hal_get_cycles();
    732 if( CONFIG_DEBUG_VMM_DESTROY < cycle )
     732if( DEBUG_VMM_DESTROY < cycle )
    733733printk("\n[DBG] %s : thread %x exit / cycle %d\n",
    734734__FUNCTION__ , CURRENT_THREAD , cycle );
     
    882882        error_t      error;
    883883
    884 #if CONFIG_DEBUG_VMM_CREATE_VSEG
     884#if DEBUG_VMM_CREATE_VSEG
    885885uint32_t cycle = (uint32_t)hal_get_cycles();
    886 if( CONFIG_DEBUG_VMM_CREATE_VSEG < cycle )
     886if( DEBUG_VMM_CREATE_VSEG < cycle )
    887887printk("\n[DBG] %s : thread %x enter / process %x / base %x / size %x / %s / cxy %x / cycle %d\n",
    888888__FUNCTION__, CURRENT_THREAD, process->pid, base, size, vseg_type_str(type), cxy, cycle );
     
    973973        remote_rwlock_wr_unlock( lock_xp );
    974974
    975 #if CONFIG_DEBUG_VMM_CREATE_VSEG
     975#if DEBUG_VMM_CREATE_VSEG
    976976cycle = (uint32_t)hal_get_cycles();
    977 if( CONFIG_DEBUG_VMM_CREATE_VSEG < cycle )
     977if( DEBUG_VMM_CREATE_VSEG < cycle )
    978978printk("\n[DBG] %s : thread %x exit / process %x / %s / cxy %x / cycle %d\n",
    979979__FUNCTION__, CURRENT_THREAD, process->pid, vseg_type_str(type), cxy, cycle );
     
    11101110    uint32_t    count;      // actual number of pendinf forks
    11111111
    1112 #if CONFIG_DEBUG_VMM_UNMAP_VSEG
     1112#if DEBUG_VMM_UNMAP_VSEG
    11131113uint32_t cycle = (uint32_t)hal_get_cycles();
    1114 if( CONFIG_DEBUG_VMM_UNMAP_VSEG < cycle )
     1114if( DEBUG_VMM_UNMAP_VSEG < cycle )
    11151115printk("\n[DBG] %s : thread %x enter / process %x / vseg %s / base %x / cycle %d\n",
    11161116__FUNCTION__, CURRENT_THREAD, process->pid, vseg_type_str( vseg->type ), vseg->vpn_base, cycle );
     
    11311131        {
    11321132
    1133 #if( CONFIG_DEBUG_VMM_UNMAP_VSEG & 1 )
    1134 if( CONFIG_DEBUG_VMM_UNMAP_VSEG < cycle )
     1133#if( DEBUG_VMM_UNMAP_VSEG & 1 )
     1134if( DEBUG_VMM_UNMAP_VSEG < cycle )
    11351135printk("- vpn %x / ppn %x\n" , vpn , ppn );
    11361136#endif
     
    11831183    }
    11841184
    1185 #if CONFIG_DEBUG_VMM_UNMAP_VSEG
     1185#if DEBUG_VMM_UNMAP_VSEG
    11861186cycle = (uint32_t)hal_get_cycles();
    1187 if( CONFIG_DEBUG_VMM_UNMAP_VSEG < cycle )
     1187if( DEBUG_VMM_UNMAP_VSEG < cycle )
    11881188printk("\n[DBG] %s : thread %x exit / process %x / vseg %s / base %x / cycle %d\n",
    11891189__FUNCTION__, CURRENT_THREAD, process->pid, vseg_type_str( vseg->type ), vseg->vpn_base, cycle );
     
    13831383{
    13841384
    1385 #if CONFIG_DEBUG_VMM_ALLOCATE_PAGE
    1386 if( CONFIG_DEBUG_VMM_ALLOCATE_PAGE < (uint32_t)hal_get_cycles() )
     1385#if DEBUG_VMM_ALLOCATE_PAGE
     1386if( DEBUG_VMM_ALLOCATE_PAGE < (uint32_t)hal_get_cycles() )
    13871387printk("\n[DBG] in %s : thread %x enter for vpn %x\n",
    13881388__FUNCTION__ , CURRENT_THREAD, vpn );
     
    14271427    }
    14281428
    1429 #if CONFIG_DEBUG_VMM_ALLOCATE_PAGE
    1430 if( CONFIG_DEBUG_VMM_ALLOCATE_PAGE < (uint32_t)hal_get_cycles() )
     1429#if DEBUG_VMM_ALLOCATE_PAGE
     1430if( DEBUG_VMM_ALLOCATE_PAGE < (uint32_t)hal_get_cycles() )
    14311431printk("\n[DBG] in %s : thread %x exit for vpn = %d / ppn = %x\n",
    14321432__FUNCTION__ , CURRENT_THREAD, vpn, ppm_page2ppn( XPTR( page_cxy , page_ptr ) ) );
     
    14521452    index     = vpn - vseg->vpn_base;
    14531453
    1454 #if CONFIG_DEBUG_VMM_GET_ONE_PPN
    1455 if( CONFIG_DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() )
     1454#if DEBUG_VMM_GET_ONE_PPN
     1455if( DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() )
    14561456printk("\n[DBG] %s : thread %x enter for vpn = %x / type = %s / index = %d\n",
    14571457__FUNCTION__, CURRENT_THREAD, vpn, vseg_type_str(type), index );
     
    15151515            uint32_t elf_offset = vseg->file_offset + offset;
    15161516
    1517 #if (CONFIG_DEBUG_VMM_GET_ONE_PPN & 0x1)
    1518 if( CONFIG_DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() )
     1517#if (DEBUG_VMM_GET_ONE_PPN & 0x1)
     1518if( DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() )
    15191519printk("\n[DBG] %s : thread %x for vpn = %x / elf_offset = %x\n",
    15201520__FUNCTION__, CURRENT_THREAD, vpn, elf_offset );
     
    15301530            {
    15311531
    1532 #if (CONFIG_DEBUG_VMM_GET_ONE_PPN & 0x1)
    1533 if( CONFIG_DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() )
     1532#if (DEBUG_VMM_GET_ONE_PPN & 0x1)
     1533if( DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() )
    15341534printk("\n[DBG] %s : thread%x for vpn = %x / fully in BSS\n",
    15351535__FUNCTION__, CURRENT_THREAD, vpn );
     
    15481548            {
    15491549
    1550 #if (CONFIG_DEBUG_VMM_GET_ONE_PPN & 0x1)
    1551 if( CONFIG_DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() )
     1550#if (DEBUG_VMM_GET_ONE_PPN & 0x1)
     1551if( DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() )
    15521552printk("\n[DBG] %s : thread %x, for vpn = %x / fully in mapper\n",
    15531553__FUNCTION__, CURRENT_THREAD, vpn );
     
    15801580            {
    15811581
    1582 #if (CONFIG_DEBUG_VMM_GET_ONE_PPN & 0x1)
    1583 if( CONFIG_DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() )
     1582#if (DEBUG_VMM_GET_ONE_PPN & 0x1)
     1583if( DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() )
    15841584printk("\n[DBG] %s : thread %x for vpn = %x / both mapper & BSS\n"
    15851585"      %d bytes from mapper / %d bytes from BSS\n",
     
    16271627    *ppn = ppm_page2ppn( page_xp );
    16281628
    1629 #if CONFIG_DEBUG_VMM_GET_ONE_PPN
    1630 if( CONFIG_DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() )
     1629#if DEBUG_VMM_GET_ONE_PPN
     1630if( DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() )
    16311631printk("\n[DBG] %s : thread %x exit for vpn = %x / ppn = %x\n",
    16321632__FUNCTION__ , CURRENT_THREAD , vpn , *ppn );
     
    16551655    "not called in the reference cluster\n" );
    16561656
    1657 #if CONFIG_DEBUG_VMM_GET_PTE
     1657#if DEBUG_VMM_GET_PTE
    16581658uint32_t cycle = (uint32_t)hal_get_cycles();
    1659 if( CONFIG_DEBUG_VMM_GET_PTE < cycle )
    1660 printk("\n[DBG] %s : thread %x enter for vpn = %x / process %x / cow = %d / cycle %d\n",
     1659if( DEBUG_VMM_GET_PTE < cycle )
     1660printk("\n[DBG] %s : thread %x enter / vpn %x / process %x / cow %d / cycle %d\n",
    16611661__FUNCTION__ , CURRENT_THREAD , vpn , process->pid , cow , cycle );
    16621662#endif
     
    16751675    }
    16761676
    1677 #if CONFIG_DEBUG_VMM_GET_PTE
     1677#if( DEBUG_VMM_GET_PTE & 1 )
    16781678cycle = (uint32_t)hal_get_cycles();
    1679 if( CONFIG_DEBUG_VMM_GET_PTE < cycle )
     1679if( DEBUG_VMM_GET_PTE < cycle )
    16801680printk("\n[DBG] %s : thread %x found vseg %s / vpn_base = %x / vpn_size = %x\n",
    16811681__FUNCTION__, CURRENT_THREAD, vseg_type_str(vseg->type), vseg->vpn_base, vseg->vpn_size );
    16821682#endif
    16831683
    1684     // access GPT to get current PTE attributes and PPN
    1685     hal_gpt_get_pte( &vmm->gpt , vpn , &old_attr , &old_ppn );
    1686 
    1687     // for both "copy_on_write" and "page_fault" events, allocate a physical page,
    1688     // initialize it, register it in the reference GPT, update GPT copies in all
    1689     // clusters containing a copy, and return the new_ppn and new_attr
    1690 
    1691     if( cow )  /////////////////////////// copy_on_write request //////////////////////
    1692     {
     1684    if( cow )  //////////////// copy_on_write request //////////////////////
     1685               // get PTE from reference GPT
     1686               // allocate a new physical page if there is pending forks,
     1687               // initialize it from old physical page content,
     1688               // update PTE in all GPT copies,
     1689    {
     1690        // access GPT to get current PTE attributes and PPN
     1691        hal_gpt_get_pte( &vmm->gpt , vpn , &old_attr , &old_ppn );
     1692
    16931693        assert( (old_attr & GPT_MAPPED) , __FUNCTION__ ,
    16941694        "PTE must be mapped for a copy-on-write exception\n" );
    16951695
    1696 #if CONFIG_DEBUG_VMM_GET_PTE
     1696#if( DEBUG_VMM_GET_PTE & 1 )
    16971697cycle = (uint32_t)hal_get_cycles();
    1698 if( CONFIG_DEBUG_VMM_GET_PTE < cycle )
     1698if( DEBUG_VMM_GET_PTE < cycle )
    16991699printk("\n[DBG] %s : thread %x handling COW for vpn %x in process %x\n",
    17001700__FUNCTION__, CURRENT_THREAD, vpn, process->pid );
     
    17441744        hal_remote_atomic_add( XPTR( page_cxy , &page_ptr->forks ) , -1 );
    17451745    }
    1746     else  ////////////////////////////////// page_fault request ////////////////////////
     1746    else        //////////// page_fault request ///////////////////////////
     1747                // get PTE from reference GPT
     1748                // allocate a physical page if it is a true page fault,
     1749                // register in reference GPT, but don't update GPT copies
    17471750    { 
     1751        // access GPT to get current PTE
     1752        hal_gpt_get_pte( &vmm->gpt , vpn , &old_attr , &old_ppn );
     1753
    17481754        if( (old_attr & GPT_MAPPED) == 0 )   // true page_fault => map it
    17491755        {
    17501756
    1751 #if CONFIG_DEBUG_VMM_GET_PTE
     1757#if( DEBUG_VMM_GET_PTE & 1 )
    17521758cycle = (uint32_t)hal_get_cycles();
    1753 if( CONFIG_DEBUG_VMM_GET_PTE < cycle )
     1759if( DEBUG_VMM_GET_PTE < cycle )
    17541760printk("\n[DBG] %s : thread %x handling page fault for vpn %x in process %x\n",
    17551761__FUNCTION__, CURRENT_THREAD, vpn, process->pid );
     
    17921798    }
    17931799
    1794 #if CONFIG_DEBUG_VMM_GET_PTE
     1800#if DEBUG_VMM_GET_PTE
    17951801cycle = (uint32_t)hal_get_cycles();
    1796 if( CONFIG_DEBUG_VMM_GET_PTE < cycle )
    1797 printk("\n[DBG] %s : thread,%x exit for vpn %x in process %x / ppn = %x / attr = %x / cycle %d\n",
     1802if( DEBUG_VMM_GET_PTE < cycle )
     1803printk("\n[DBG] %s : thread,%x exit / vpn %x in process %x / ppn %x / attr %x / cycle %d\n",
    17981804__FUNCTION__, CURRENT_THREAD, vpn, process->pid, new_ppn, new_attr, cycle );
    17991805#endif
    18001806
    1801     // return success
     1807    // return PPN and flags
    18021808    *ppn  = new_ppn;
    18031809    *attr = new_attr;
     
    18141820    error_t          error;
    18151821
    1816 #if CONFIG_DEBUG_VMM_GET_PTE
     1822#if DEBUG_VMM_GET_PTE
    18171823uint32_t cycle = (uint32_t)hal_get_cycles();
    1818 if( CONFIG_DEBUG_VMM_GET_PTE < cycle )
     1824if( DEBUG_VMM_GET_PTE < cycle )
    18191825printk("\n[DBG] %s : thread %x enter for vpn %x / process %x / cycle %d\n",
    18201826__FUNCTION__ , CURRENT_THREAD , vpn , process->pid , cycle );
     
    18541860    }
    18551861
    1856 #if CONFIG_DEBUG_VMM_GET_PTE
     1862#if DEBUG_VMM_GET_PTE
    18571863cycle = (uint32_t)hal_get_cycles();
    1858 if( CONFIG_DEBUG_VMM_GET_PTE < cycle )
     1864if( DEBUG_VMM_GET_PTE < cycle )
    18591865printk("\n[DBG] %s : thread %x exit for vpn %x / process %x / cycle %d\n",
    18601866__FUNCTION__ , CURRENT_THREAD , vpn , process->pid , cycle );
     
    18731879    error_t          error;
    18741880
    1875 #if CONFIG_DEBUG_VMM_GET_PTE
     1881#if DEBUG_VMM_GET_PTE
    18761882uint32_t cycle = (uint32_t)hal_get_cycles();
    1877 if( CONFIG_DEBUG_VMM_GET_PTE < cycle )
     1883if( DEBUG_VMM_GET_PTE < cycle )
    18781884printk("\n[DBG] %s : thread %x enter for vpn %x / process %x / cycle %d\n",
    18791885__FUNCTION__ , CURRENT_THREAD , vpn , process->pid , cycle );
     
    19131919    }
    19141920
    1915 #if CONFIG_DEBUG_VMM_GET_PTE
     1921#if DEBUG_VMM_GET_PTE
    19161922cycle = (uint32_t)hal_get_cycles();
    1917 if( CONFIG_DEBUG_VMM_GET_PTE < cycle )
     1923if( DEBUG_VMM_GET_PTE < cycle )
    19181924printk("\n[DBG] %s : thread %x exit for vpn %x / process %x / cycle %d\n",
    19191925__FUNCTION__ , CURRENT_THREAD , vpn , process->pid , cycle );
Note: See TracChangeset for help on using the changeset viewer.