Changeset 435 for trunk/kernel/mm


Ignore:
Timestamp:
Feb 20, 2018, 5:32:17 PM (6 years ago)
Author:
alain
Message:

Fix a bad bug in scheduler...

Location:
trunk/kernel/mm
Files:
4 edited

Legend:

Unmodified
Added
Removed
  • trunk/kernel/mm/kcm.c

    r433 r435  
    4848{
    4949
    50 #if CONFIG_DEBUG_KCM_ALLOC
     50#if CONFIG_DEBUG_KCM
    5151uint32_t cycle = (uint32_t)hal_get_cycles();
    52 if( CONFIG_DEBUG_KCM_ALLOC < cycle )
     52if( CONFIG_DEBUG_KCM < cycle )
    5353printk("\n[DBG] %s : thread %x enters for %s / page %x / count %d / active %d\n",
    5454__FUNCTION__ , CURRENT_THREAD , kmem_type_str( kcm->type ) ,
     
    8585                     + (index * kcm->block_size) );
    8686
    87 #if CONFIG_DEBUG_KCM_ALLOC
     87#if CONFIG_DEBUG_KCM
    8888cycle = (uint32_t)hal_get_cycles();
    89 if( CONFIG_DEBUG_KCM_ALLOC < cycle )
     89if( CONFIG_DEBUG_KCM < cycle )
    9090printk("\n[DBG] %s : thread %x exit / type  %s / ptr %p / page %x / count %d\n",
    9191__FUNCTION__ , CURRENT_THREAD , kmem_type_str( kcm->type ) , ptr ,
  • trunk/kernel/mm/kmem.c

    r433 r435  
    145145        assert( ((type > 1) && (type < KMEM_TYPES_NR) ) , __FUNCTION__ , "illegal KCM type" );
    146146
    147         kmem_dmsg("\n[DBG] %s : enters / KCM type %s missing in cluster %x\n",
    148                   __FUNCTION__ , kmem_type_str( type ) , local_cxy );
     147#if CONFIG_DEBUG_KMEM
     148uint32_t cycle = (uint32_t)hal_get_cycles();
     149if( CONFIG_DEBUG_KMEM < cycle )
     150printk("\n[DBG] %s : thread %x enter / KCM type %s missing in cluster %x / cycle %d\n",
     151__FUNCTION__, CURRENT_THREAD, kmem_type_str( type ), local_cxy, cycle );
     152#endif
    149153
    150154        cluster_t * cluster = LOCAL_CLUSTER;
     
    169173        hal_fence();
    170174
    171         kmem_dmsg("\n[DBG] %s : exit / KCM type %s created in cluster %x\n",
    172                   __FUNCTION__ , kmem_type_str( type ) , local_cxy );
     175#if CONFIG_DEBUG_KMEM
     176cycle = (uint32_t)hal_get_cycles();
     177if( CONFIG_DEBUG_KMEM < cycle )
     178printk("\n[DBG] %s : thread %x exit / cycle %d\n",
     179__FUNCTION__, CURRENT_THREAD, cycle );
     180#endif
    173181
    174182        return 0;
     
    192200        assert( (type < KMEM_TYPES_NR) , __FUNCTION__ , "illegal KMEM request type" );
    193201
    194         kmem_dmsg("\n[DBG] %s : enters in cluster %x for type %s\n",
    195                       __FUNCTION__ , local_cxy , kmem_type_str( type ) );
     202#if CONFIG_DEBUG_KMEM
     203uint32_t cycle = (uint32_t)hal_get_cycles();
     204if( CONFIG_DEBUG_KMEM < cycle )
     205printk("\n[DBG] %s : thread %x enter / type %s / cluster %x / cycle %d\n",
     206__FUNCTION__, CURRENT_THREAD, kmem_type_str( type ), local_cxy, cycle );
     207#endif
    196208
    197209        // analyse request type
    198210        if( type == KMEM_PAGE )                        // PPM allocator
    199211        {
    200 
    201 #if CONFIG_DEBUG_KMEM_ALLOC
    202 if( CONFIG_DEBUG_KMEM_ALLOC < (uint32_t)hal_get_cycles() )
    203 printk("\n[DBG] in %s : thread %x enter for %d page(s)\n",
    204 __FUNCTION__ , CURRENT_THREAD , 1<<size );
    205 #endif
    206 
    207212                // allocate the number of requested pages
    208213                ptr = (void *)ppm_alloc_pages( size );
     
    217222                if( flags & AF_ZERO ) page_zero( (page_t *)ptr );
    218223
    219                 kmem_dmsg("\n[DBG] %s : exit in cluster %x for type %s / page = %x / base = %x\n",
    220                           __FUNCTION__, local_cxy , kmem_type_str( type ) ,
    221                           (intptr_t)ptr , (intptr_t)ppm_page2base( ptr ) );
    222 
    223 #if CONFIG_DEBUG_KMEM_ALLOC
    224 if( CONFIG_DEBUG_KMEM_ALLOC < (uint32_t)hal_get_cycles() )
    225 printk("\n[DBG] in %s : thread %x exit / %d page(s) allocated / ppn = %x\n",
    226 __FUNCTION__ , CURRENT_THREAD , 1<<size , ppm_page2ppn( XPTR( local_cxy , ptr ) ) );
     224#if CONFIG_DEBUG_KMEM
     225cycle = (uint32_t)hal_get_cycles();
     226if( CONFIG_DEBUG_KMEM < cycle )
     227printk("\n[DBG] %s : thread %x exit / %d page(s) allocated / ppn %x / cycle %d\n",
     228__FUNCTION__, CURRENT_THREAD, 1<<size, ppm_page2ppn(XPTR(local_cxy,ptr)), cycle );
    227229#endif
    228230
     
    242244                if( flags & AF_ZERO ) memset( ptr , 0 , size );
    243245
    244                 kmem_dmsg("\n[DBG] %s : exit in cluster %x for type %s / base = %x / size = %d\n",
    245                           __FUNCTION__, local_cxy , kmem_type_str( type ) ,
    246                           (intptr_t)ptr , req->size );
     246#if CONFIG_DEBUG_KMEM
     247cycle = (uint32_t)hal_get_cycles();
     248if( CONFIG_DEBUG_KMEM < cycle )
     249printk("\n[DBG] %s : thread %x exit / type %s allocated / base %x / size %d / cycle %d\n",
     250__FUNCTION__, CURRENT_THREAD, kmem_type_str( type ), (intptr_t)ptr, size, cycle );
     251#endif
     252
    247253        }
    248254        else                                           // KCM allocator
     
    269275                if( flags & AF_ZERO ) memset( ptr , 0 , kmem_type_size( type ) );
    270276
    271                 kmem_dmsg("\n[DBG] %s : exit in cluster %x for type %s / base = %x / size = %d\n",
    272                           __FUNCTION__, local_cxy , kmem_type_str( type ) ,
    273                           (intptr_t)ptr , kmem_type_size( type ) );
     277#if CONFIG_DEBUG_KMEM
     278cycle = (uint32_t)hal_get_cycles();
     279if( CONFIG_DEBUG_KMEM < cycle )
     280printk("\n[DBG] %s : thread %x exit / type %s allocated / base %x / size %d / cycle %d\n",
     281__FUNCTION__, CURRENT_THREAD, kmem_type_str(type), (intptr_t)ptr,
     282kmem_type_size(type), cycle );
     283#endif
     284
    274285        }
    275286
  • trunk/kernel/mm/mapper.c

    r408 r435  
    143143    error_t       error;
    144144
    145 mapper_dmsg("\n[DBG] %s : core[%x,%d] enters for page %d / mapper %x\n",
    146 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , index , mapper );
     145#if CONFIG_DEBUG_MAPPER_GET_PAGE
     146uint32_t cycle = (uint32_t)hal_get_cycles();
     147if( CONFIG_DEBUG_MAPPER_GET_PAGE < cycle )
     148printk("\n[DBG] %s : thread %x enter for page %d / mapper %x / cycle %d\n",
     149__FUNCTION__ , CURRENT_THREAD , index , mapper , cycle );
     150#endif
    147151
    148152    thread_t * this = CURRENT_THREAD;
     
    171175        {
    172176
    173 mapper_dmsg("\n[DBG] %s : core[%x,%d] missing page => load from device\n",
    174 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid );
    175 
     177#if (CONFIG_DEBUG_MAPPER_GET_PAGE & 1)
     178if( CONFIG_DEBUG_MAPPER_GET_PAGE < cycle )
     179printk("\n[DBG] %s : missing page => load from device\n", __FUNCTION__ );
     180#endif
    176181            // allocate one page from PPM
    177182            req.type  = KMEM_PAGE;
     
    230235            // reset the page INLOAD flag to make the page available to all readers
    231236            page_clear_flag( page , PG_INLOAD );
    232 
    233 mapper_dmsg("\n[DBG] %s : missing page loaded / ppn = %x\n",
    234 __FUNCTION__ , ppm_page2ppn(XPTR(local_cxy,page)) );
    235 
    236237        }
    237238        else if( page_is_flag( page , PG_INLOAD ) )   // page is loaded by another thread
     
    256257    }
    257258
    258 mapper_dmsg("\n[DBG] %s : exit for page %d / mapper %x / page_desc = %x\n",
    259 __FUNCTION__ , index , mapper , page );
     259#if CONFIG_DEBUG_MAPPER_GET_PAGE
     260cycle = (uint32_t)hal_get_cycles();
     261if( CONFIG_DEBUG_MAPPER_GET_PAGE < cycle )
     262printk("\n[DBG] %s : thread %x exit for page %d / ppn %x / cycle %d\n",
     263__FUNCTION__, CURRENT_THREAD, index, ppm_page2ppn(XPTR(local_cxy, page)), cycle );
     264#endif
    260265
    261266    return page;
     
    312317    uint8_t  * buf_ptr;        // current buffer  address
    313318
    314     mapper_dmsg("\n[DBG] %s : enters / to_buf = %d / buffer = %x\n",
    315                 __FUNCTION__ , to_buffer , buffer );
     319#if CONFIG_DEBUG_MAPPER_MOVE_USER
     320uint32_t cycle = (uint32_t)hal_get_cycles();
     321if( CONFIG_DEBUG_MAPPER_MOVE_USER < cycle )
     322printk("\n[DBG] %s : thread %x enter / to_buf %d / buffer %x / cycle %d\n",
     323__FUNCTION__ , CURRENT_THREAD , to_buffer , buffer , cycle );
     324#endif
    316325
    317326    // compute offsets of first and last bytes in file
     
    338347        else                       page_count = CONFIG_PPM_PAGE_SIZE;
    339348
    340         mapper_dmsg("\n[DBG] %s : index = %d / offset = %d / count = %d\n",
    341                     __FUNCTION__ , index , page_offset , page_count );
     349#if (CONFIG_DEBUG_MAPPER_MOVE_USER & 1)
     350if( CONFIG_DEBUG_MAPPER_MOVE_USER < cycle )
     351printk("\n[DBG] %s : index = %d / offset = %d / count = %d\n",
     352__FUNCTION__ , index , page_offset , page_count );
     353#endif
    342354
    343355        // get page descriptor
     
    353365        buf_ptr = (uint8_t *)buffer + done;
    354366
    355         mapper_dmsg("\n[DBG] %s : index = %d / buf_ptr = %x / map_ptr = %x\n",
    356                     __FUNCTION__ , index , buf_ptr , map_ptr );
    357 
    358367        // move fragment
    359368        if( to_buffer )
     
    370379    }
    371380
    372     mapper_dmsg("\n[DBG] %s : exit for buffer %x\n",
    373                 __FUNCTION__, buffer );
     381#if CONFIG_DEBUG_MAPPER_MOVE_USER
     382cycle = (uint32_t)hal_get_cycles();
     383if( CONFIG_DEBUG_MAPPER_MOVE_USER < cycle )
     384printk("\n[DBG] %s : thread %x exit / to_buf %d / buffer %x / cycle %d\n",
     385__FUNCTION__ , CURRENT_THREAD , to_buffer , buffer , cycle );
     386#endif
    374387
    375388    return 0;
     
    399412    uint8_t * buffer_ptr = (uint8_t *)GET_PTR( buffer_xp );
    400413
    401 mapper_dmsg("\n[DBG] %s : core[%x,%d] / to_buf = %d / buf_cxy = %x / buf_ptr = %x / size = %x\n",
    402 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, to_buffer, buffer_cxy, buffer_ptr, size );
     414#if CONFIG_DEBUG_MAPPER_MOVE_KERNEL
     415uint32_t cycle = (uint32_t)hal_get_cycles();
     416if( CONFIG_DEBUG_MAPPER_MOVE_KERNEL < cycle )
     417printk("\n[DBG] %s : thread %x enter / to_buf %d / buf_cxy %x / buf_ptr %x / cycle %d\n",
     418__FUNCTION__ , CURRENT_THREAD , to_buffer , buffer_cxy , buffer_ptr , cycle );
     419#endif
    403420
    404421    // compute offsets of first and last bytes in file
     
    410427    uint32_t last  = max_byte >> CONFIG_PPM_PAGE_SHIFT;
    411428
    412 mapper_dmsg("\n[DBG] %s : core[%x,%d] / first_page = %d / last_page = %d\n",
    413 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, first, last );
     429#if (CONFIG_DEBUG_MAPPER_MOVE_KERNEL & 1)
     430if( CONFIG_DEBUG_MAPPER_MOVE_KERNEL < cycle )
     431printk("\n[DBG] %s : first_page %d / last_page %d\n", __FUNCTION__, first, last );
     432#endif
    414433
    415434    // compute source and destination clusters
     
    440459        else                       page_count = CONFIG_PPM_PAGE_SIZE;
    441460
    442 mapper_dmsg("\n[DBG] %s : core[%x;%d] / page_index = %d / offset = %d / bytes = %d\n",
    443 __FUNCTION__ , local_cxy, CURRENT_THREAD->core->lid, index, page_offset, page_count );
     461#if (CONFIG_DEBUG_MAPPER_MOVE_KERNEL & 1)
     462if( CONFIG_DEBUG_MAPPER_MOVE_KERNEL < cycle )
     463printk("\n[DBG] %s : page_index = %d / offset = %d / bytes = %d\n",
     464__FUNCTION__ , index , page_offset , page_count );
     465#endif
    444466
    445467        // get page descriptor
     
    472494    }
    473495
    474 mapper_dmsg("\n[DBG] %s : core_cxy[%x,%d] / exit / buf_cxy = %x / buf_ptr = %x / size = %x\n",
    475 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, buffer_cxy, buffer_ptr, size );
     496#if CONFIG_DEBUG_MAPPER_MOVE_KERNEL
     497cycle = (uint32_t)hal_get_cycles();
     498if( CONFIG_DEBUG_MAPPER_MOVE_KERNEL < cycle )
     499printk("\n[DBG] %s : thread %x exit / to_buf %d / buf_cxy %x / buf_ptr %x / cycle %d\n",
     500__FUNCTION__ , CURRENT_THREAD , to_buffer , buffer_cxy , buffer_ptr , cycle );
     501#endif
    476502
    477503    return 0;
  • trunk/kernel/mm/vmm.c

    r433 r435  
    16431643#if CONFIG_DEBUG_VMM_GET_PTE
    16441644uint32_t cycle = (uint32_t)hal_get_cycles();
    1645 if( CONFIG_DEBUG_VMM_GET_PTE > cycle )
     1645if( CONFIG_DEBUG_VMM_GET_PTE < cycle )
    16461646printk("\n[DBG] %s : thread %x enter for vpn = %x / process %x / cow = %d / cycle %d\n",
    16471647__FUNCTION__ , CURRENT_THREAD , vpn , process->pid , cow , cycle );
     
    18001800    error_t          error;
    18011801
     1802#if CONFIG_DEBUG_VMM_GET_PTE
     1803uint32_t cycle = (uint32_t)hal_get_cycles();
     1804if( CONFIG_DEBUG_VMM_GET_PTE < cycle )
     1805printk("\n[DBG] %s : thread %x enter for vpn %x / process %x / cycle %d\n",
     1806__FUNCTION__ , CURRENT_THREAD , vpn , process->pid , cycle );
     1807#endif
     1808
    18021809    // get reference process cluster and local pointer
    18031810    cxy_t       ref_cxy = GET_CXY( process->ref_xp );
     
    18331840    }
    18341841
     1842#if CONFIG_DEBUG_VMM_GET_PTE
     1843cycle = (uint32_t)hal_get_cycles();
     1844if( CONFIG_DEBUG_VMM_GET_PTE < cycle )
     1845printk("\n[DBG] %s : thread %x exit for vpn %x / process %x / cycle %d\n",
     1846__FUNCTION__ , CURRENT_THREAD , vpn , process->pid , cycle );
     1847#endif
     1848
    18351849    return error;
    18361850
     
    18451859    error_t          error;
    18461860
     1861#if CONFIG_DEBUG_VMM_GET_PTE
     1862uint32_t cycle = (uint32_t)hal_get_cycles();
     1863if( CONFIG_DEBUG_VMM_GET_PTE < cycle )
     1864printk("\n[DBG] %s : thread %x enter for vpn %x / process %x / cycle %d\n",
     1865__FUNCTION__ , CURRENT_THREAD , vpn , process->pid , cycle );
     1866#endif
    18471867   
    18481868    // get reference process cluster and local pointer
     
    18791899    }
    18801900
     1901#if CONFIG_DEBUG_VMM_GET_PTE
     1902cycle = (uint32_t)hal_get_cycles();
     1903if( CONFIG_DEBUG_VMM_GET_PTE < cycle )
     1904printk("\n[DBG] %s : thread %x exit for vpn %x / process %x / cycle %d\n",
     1905__FUNCTION__ , CURRENT_THREAD , vpn , process->pid , cycle );
     1906#endif
     1907
    18811908    return error;
    18821909
Note: See TracChangeset for help on using the changeset viewer.