Changeset 635 for trunk/kernel/mm/kcm.c


Ignore:
Timestamp:
Jun 26, 2019, 11:42:37 AM (15 months ago)
Author:
alain
Message:

This version is a major evolution: The physical memory allocators,
defined in the kmem.c, ppm.c, and kcm.c files have been modified
to support remote accesses. The RPCs that were previously user
to allocate physical memory in a remote cluster have been removed.
This has been done to cure a dead-lock in case of concurrent page-faults.

This version 2.2 has been tested on a (4 clusters / 2 cores per cluster)
TSAR architecture, for both the "sort" and the "fft" applications.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/kernel/mm/kcm.c

    r619 r635  
    11/*
    2  * kcm.c - Per cluster Kernel Cache Manager implementation.
     2 * kcm.c - Kernel Cache Manager implementation.
    33 *
    4  * Author  Ghassan Almaless (2008,2009,2010,2011,2012)
    5  *         Alain Greiner    (2016,2017,2018,2019)
     4 * Author  Alain Greiner    (2016,2017,2018,2019)
    65 *
    76 * Copyright (c) UPMC Sorbonne Universites
     
    3837
    3938
     39/////////////////////////////////////////////////////////////////////////////////////
     40//        Local access functions
     41/////////////////////////////////////////////////////////////////////////////////////
     42
    4043//////////////////////////////////////////////////////////////////////////////////////
    41 // This static function returns pointer on an allocated block from an active page.
    42 // It returns NULL if no block available in selected page.
    43 // It changes the page status if required.
     44// This static function must be called by a local thread.
     45// It returns a pointer on a block allocated from a non-full kcm_page.
     46// It makes a panic if no block is available in selected page.
     47// It changes the page status as required.
    4448//////////////////////////////////////////////////////////////////////////////////////
    45 // @ kcm      : pointer on kcm allocator.
    46 // @ kcm_page : pointer on active kcm page to use.
    47 /////////////////////////////////////////////////////////////////////////////////////
    48 static void * kcm_get_block( kcm_t      * kcm,
    49                              kcm_page_t * kcm_page )
    50 {
    51 
    52 #if DEBUG_KCM
    53 thread_t * this = CURRENT_THREAD;
    54 uint32_t cycle = (uint32_t)hal_get_cycles();
     49// @ kcm      : pointer on KCM allocator.
     50// @ kcm_page : pointer on a non-full kcm_page.
     51// @ return pointer on allocated block.
     52/////////////////////////////////////////////////////////////////////////////////////
     53static void * __attribute__((noinline)) kcm_get_block( kcm_t      * kcm,
     54                                                       kcm_page_t * kcm_page )
     55{
     56    // initialise variables
     57    uint32_t size   = 1 << kcm->order;
     58    uint32_t max    = kcm->max_blocks;
     59    uint32_t count  = kcm_page->count;
     60    uint64_t status = kcm_page->status;
     61
     62assert( (count < max) , "kcm_page should not be full" );
     63
     64    uint32_t index  = 1;
     65    uint64_t mask   = (uint64_t)0x2;
     66    uint32_t found  = 0;
     67
     68        // allocate first free block in kcm_page, update status,
     69    // and count , compute index of allocated block in kcm_page
     70    while( index <= max )
     71    {
     72        if( (status & mask) == 0 )   // block non allocated
     73        {
     74            kcm_page->status = status | mask;
     75            kcm_page->count  = count + 1;
     76            found  = 1;
     77
     78            break;     
     79        }
     80       
     81        index++;
     82        mask <<= 1;
     83    }
     84
     85    // change the page list if almost full
     86    if( count == max-1 )
     87    {
     88                list_unlink( &kcm_page->list);
     89                kcm->active_pages_nr--;
     90
     91        list_add_first( &kcm->full_root , &kcm_page->list );
     92                kcm->full_pages_nr ++;
     93    }
     94
     95        // compute return pointer
     96        void * ptr = (void *)((intptr_t)kcm_page + (index * size) );
     97
     98#if (DEBUG_KCM & 1)
     99thread_t * this  = CURRENT_THREAD;
     100uint32_t   cycle = (uint32_t)hal_get_cycles();
    55101if( DEBUG_KCM < cycle )
    56 printk("\n[%s] thread[%x,%x] enters for %s / page %x / count %d / active %d\n",
    57 __FUNCTION__, this->process->pid, this->trdid, kmem_type_str(kcm->type),
    58 (intptr_t)kcm_page , kcm_page->count , kcm_page->active );
    59 #endif
    60 
    61 assert( kcm_page->active , "kcm_page should be active" );
    62 
    63         // get first block available
    64         int32_t index = bitmap_ffs( kcm_page->bitmap , kcm->blocks_nr );
    65 
    66 assert( (index != -1) , "kcm_page should not be full" );
    67 
    68         // allocate block
    69         bitmap_clear( kcm_page->bitmap , index );
    70 
    71         // increase kcm_page count
    72         kcm_page->count ++;
    73 
    74         // change the kcm_page to busy if no more free block in page
    75         if( kcm_page->count >= kcm->blocks_nr )
    76         {
    77                 kcm_page->active = 0;
    78                 list_unlink( &kcm_page->list);
    79                 kcm->active_pages_nr --;
    80 
    81                 list_add_first( &kcm->busy_root , &kcm_page->list);
    82                 kcm->busy_pages_nr ++;
    83                 kcm_page->busy = 1;
    84         }
    85 
    86         // compute return pointer
    87         void * ptr = (void *)((intptr_t)kcm_page + CONFIG_KCM_SLOT_SIZE
    88                      + (index * kcm->block_size) );
    89 
    90 #if DEBUG_KCM
    91 cycle = (uint32_t)hal_get_cycles();
    92 if( DEBUG_KCM < cycle )
    93 printk("\n[%s] thread[%x,%x] exit for %s / ptr %x / page %x / count %d\n",
    94 __FUNCTION__, this->process->pid, this->trdid, kmem_type_str(kcm->type),
    95 (intptr_t)ptr, (intptr_t)kcm_page, kcm_page->count );
     102printk("\n[%s] thread[%x,%x] allocated block %x in page %x / size %d / count %d / cycle %d\n",
     103__FUNCTION__, this->process->pid, this->trdid, ptr, kcm_page, size, count + 1, cycle );
    96104#endif
    97105
    98106        return ptr;
    99 }
    100 
    101 /////////////////////////////////////////////////////////////////////////////////////
    102 // This static function releases a previously allocated block.
    103 // It changes the kcm_page status if required.
    104 /////////////////////////////////////////////////////////////////////////////////////
    105 // @ kcm      : pointer on kcm allocator.
    106 // @ kcm_page : pointer on kcm_page.
    107 // @ ptr      : pointer on block to be released.
    108 /////////////////////////////////////////////////////////////////////////////////////
    109 static void kcm_put_block ( kcm_t      * kcm,
    110                             kcm_page_t * kcm_page,
    111                             void       * ptr )
    112 {
    113         uint32_t     index;
    114 
     107
     108}  // end kcm_get_block()
     109
     110/////////////////////////////////////////////////////////////////////////////////////
     111// This private static function must be called by a local thread.
     112// It releases a previously allocated block to the relevant kcm_page.
     113// It makes a panic if the released block is not allocated in this page.
     114// It changes the kcm_page status as required.
     115/////////////////////////////////////////////////////////////////////////////////////
     116// @ kcm        : pointer on kcm allocator.
     117// @ kcm_page   : pointer on kcm_page.
     118// @ block_ptr  : pointer on block to be released.
     119/////////////////////////////////////////////////////////////////////////////////////
     120static void __attribute__((noinline)) kcm_put_block ( kcm_t      * kcm,
     121                                                      kcm_page_t * kcm_page,
     122                                                      void       * block_ptr )
     123{
     124    // initialise variables
     125    uint32_t max    = kcm->max_blocks;
     126    uint32_t size   = 1 << kcm->order;
     127    uint32_t count  = kcm_page->count;
     128    uint64_t status = kcm_page->status;
     129   
    115130        // compute block index from block pointer
    116         index = ((uint8_t *)ptr - (uint8_t *)kcm_page - CONFIG_KCM_SLOT_SIZE) / kcm->block_size;
    117 
    118 assert( !bitmap_state( kcm_page->bitmap , index ) , "page already freed" );
    119 
    120 assert( (kcm_page->count > 0) , "count already zero" );
    121 
    122         bitmap_set( kcm_page->bitmap , index );
    123         kcm_page->count --;
    124 
    125         // change the page to active if it was busy
    126         if( kcm_page->busy )
    127         {
    128                 kcm_page->busy = 0;
     131        uint32_t index = ((intptr_t)block_ptr - (intptr_t)kcm_page) / size;
     132
     133    // compute mask in bit vector
     134    uint64_t mask = ((uint64_t)0x1) << index;
     135
     136assert( (status & mask) , "released block not allocated : status (%x,%x) / mask(%x,%x)",
     137GET_CXY(status), GET_PTR(status), GET_CXY(mask  ), GET_PTR(mask  ) );
     138
     139    // update status & count in kcm_page
     140        kcm_page->status = status & ~mask;
     141        kcm_page->count  = count - 1;
     142
     143        // change the page mode if page was full
     144        if( count == max )
     145        {
    129146                list_unlink( &kcm_page->list );
    130                 kcm->busy_pages_nr --;
     147                kcm->full_pages_nr --;
    131148
    132149                list_add_last( &kcm->active_root, &kcm_page->list );
    133150                kcm->active_pages_nr ++;
    134                 kcm_page->active = 1;
    135         }
    136 
    137         // change the kcm_page to free if last block in active page
    138         if( (kcm_page->active) && (kcm_page->count == 0) )
    139         {
    140                 kcm_page->active = 0;
    141                 list_unlink( &kcm_page->list);
    142                 kcm->active_pages_nr --;
    143 
    144                 list_add_first( &kcm->free_root , &kcm_page->list);
    145                 kcm->free_pages_nr ++;
    146         }
    147 }
    148 
    149 /////////////////////////////////////////////////////////////////////////////////////
    150 // This static function allocates one page from PPM. It initializes
    151 // the kcm_page descriptor, and introduces the new kcm_page into freelist.
    152 /////////////////////////////////////////////////////////////////////////////////////
    153 static error_t freelist_populate( kcm_t * kcm )
    154 {
    155         page_t     * page;
    156         kcm_page_t * kcm_page;
    157         kmem_req_t   req;
    158 
    159         // get one page from local PPM
    160         req.type  = KMEM_PAGE;
    161         req.size  = 0;
    162         req.flags = AF_KERNEL;
    163         page = kmem_alloc( &req );
    164 
    165         if( page == NULL )
    166         {
    167                 printk("\n[ERROR] in %s : failed to allocate page in cluster %x\n",
    168             __FUNCTION__ , local_cxy );
    169                 return ENOMEM;
    170         }
    171 
    172         // get page base address
    173         xptr_t base_xp = ppm_page2base( XPTR( local_cxy , page ) );
    174         kcm_page = (kcm_page_t *)GET_PTR( base_xp );
    175 
    176         // initialize KCM-page descriptor
    177         bitmap_set_range( kcm_page->bitmap , 0 , kcm->blocks_nr );
    178 
    179         kcm_page->busy          = 0;
    180         kcm_page->active        = 0;
    181         kcm_page->count      = 0;
    182         kcm_page->kcm           = kcm;
    183         kcm_page->page          = page;
    184 
    185         // introduce new page in free-list
    186         list_add_first( &kcm->free_root , &kcm_page->list );
    187         kcm->free_pages_nr ++;
    188 
    189         return 0;
    190 }
    191 
    192 /////////////////////////////////////////////////////////////////////////////////////
    193 // This private function gets one KCM page from the KCM freelist.
    194 // It populates the freelist if required.
    195 /////////////////////////////////////////////////////////////////////////////////////
    196 static kcm_page_t * freelist_get( kcm_t * kcm )
    197 {
    198         error_t      error;
    199         kcm_page_t * kcm_page;
    200 
    201         // get a new page from PPM if freelist empty
    202         if( kcm->free_pages_nr == 0 )
    203         {
    204                 error = freelist_populate( kcm );
    205                 if( error ) return NULL;
    206         }
    207 
    208         // get first KCM page from freelist and unlink it
    209         kcm_page = LIST_FIRST( &kcm->free_root, kcm_page_t , list );
    210         list_unlink( &kcm_page->list );
    211         kcm->free_pages_nr --;
     151        }
     152
     153#if (DEBUG_KCM & 1)
     154thread_t * this  = CURRENT_THREAD;
     155uint32_t   cycle = (uint32_t)hal_get_cycles();
     156if( DEBUG_KCM < cycle )
     157printk("\n[%s] thread[%x,%x] released block %x in page %x / size %d / count %d / cycle %d\n",
     158__FUNCTION__, this->process->pid, this->trdid, block_ptr, kcm_page, size, count - 1, cycle );
     159#endif
     160
     161}  // kcm_put_block()
     162
     163/////////////////////////////////////////////////////////////////////////////////////
     164// This private static function must be called by a local thread.
     165// It returns one non-full kcm_page with te following policy :
     166// - if the "active_list" is non empty, it returns the first "active" page,
     167//   without modifying the KCM state.
     168// - if the "active_list" is empty, it allocates a new page fromm PPM, inserts
     169//   this page in the active_list, and returns it.
     170/////////////////////////////////////////////////////////////////////////////////////
     171// @ kcm      : local pointer on local KCM allocator.
     172// @ return pointer on a non-full kcm page if success / returns NULL if no memory.
     173/////////////////////////////////////////////////////////////////////////////////////
     174static kcm_page_t * __attribute__((noinline)) kcm_get_page( kcm_t * kcm )
     175{
     176    kcm_page_t * kcm_page;
     177
     178    uint32_t active_pages_nr = kcm->active_pages_nr;
     179
     180    if( active_pages_nr > 0 )       // return first active page
     181    {
     182        kcm_page = LIST_FIRST( &kcm->active_root , kcm_page_t , list );
     183    }
     184    else                            // allocate a new page from PPM
     185        {
     186        // get one 4 Kbytes page from local PPM
     187        page_t * page = ppm_alloc_pages( 0 );
     188
     189            if( page == NULL )
     190            {
     191                    printk("\n[ERROR] in %s : failed to allocate page in cluster %x\n",
     192                __FUNCTION__ , local_cxy );
     193
     194                    return NULL;
     195        }
     196
     197            // get page base address
     198            xptr_t base_xp = ppm_page2base( XPTR( local_cxy , page ) );
     199
     200        // get local pointer on kcm_page
     201            kcm_page = GET_PTR( base_xp );
     202
     203            // initialize kcm_page descriptor
     204            kcm_page->status = 0;
     205            kcm_page->count  = 0;
     206            kcm_page->kcm    = kcm;
     207            kcm_page->page   = page;
     208
     209            // introduce new page in KCM active_list
     210            list_add_first( &kcm->active_root , &kcm_page->list );
     211            kcm->active_pages_nr ++;
     212        }
    212213
    213214        return kcm_page;
    214 }
     215
     216}  // end kcm_get_page()
    215217
    216218//////////////////////////////
    217219void kcm_init( kcm_t    * kcm,
    218                    uint32_t   type )
    219 {
    220 
    221 // the kcm_page descriptor must fit in the KCM slot
    222 assert( (sizeof(kcm_page_t) <= CONFIG_KCM_SLOT_SIZE) , "KCM slot too small\n" );
    223 
    224 // the allocated object must fit in one single page
    225 assert( (kmem_type_size(type) <= (CONFIG_PPM_PAGE_SIZE - CONFIG_KCM_SLOT_SIZE)),
    226 "allocated object requires more than one single page\n" );
     220                   uint32_t   order)
     221{
     222
     223assert( ((order > 5) && (order < 12)) , "order must be in [6,11]" );
    227224
    228225        // initialize lock
    229         busylock_init( &kcm->lock , LOCK_KCM_STATE );
    230 
    231         // initialize KCM type
    232         kcm->type = type;
     226        remote_busylock_init( XPTR( local_cxy , &kcm->lock ) , LOCK_KCM_STATE );
    233227
    234228        // initialize KCM page lists
    235         kcm->free_pages_nr   = 0;
    236         kcm->busy_pages_nr   = 0;
     229        kcm->full_pages_nr   = 0;
    237230        kcm->active_pages_nr = 0;
    238         list_root_init( &kcm->free_root );
    239         list_root_init( &kcm->busy_root );
     231        list_root_init( &kcm->full_root );
    240232        list_root_init( &kcm->active_root );
    241233
    242         // initialize block size
    243         uint32_t block_size = ARROUND_UP( kmem_type_size( type ) , CONFIG_KCM_SLOT_SIZE );
    244         kcm->block_size = block_size;
    245 
    246         // initialize number of blocks per page
    247         uint32_t  blocks_nr = (CONFIG_PPM_PAGE_SIZE - CONFIG_KCM_SLOT_SIZE) / block_size;
    248         kcm->blocks_nr = blocks_nr;
    249 
     234        // initialize order and max_blocks
     235        kcm->order      = order;
     236    kcm->max_blocks = ( CONFIG_PPM_PAGE_SIZE >> order ) - 1;
     237 
    250238#if DEBUG_KCM
    251239thread_t * this  = CURRENT_THREAD;
    252240uint32_t   cycle = (uint32_t)hal_get_cycles();
    253241if( DEBUG_KCM < cycle )
    254 printk("\n[%s] thread[%x,%x] initialised KCM %s : block_size %d / blocks_nr %d\n",
    255 __FUNCTION__, this->process->pid, this->trdid,
    256 kmem_type_str( kcm->type ), block_size, blocks_nr );
    257 #endif
    258 
    259 }
     242printk("\n[%s] thread[%x,%x] initialised KCM / order %d / max_blocks %d\n",
     243__FUNCTION__, this->process->pid, this->trdid, order, kcm->max_blocks );
     244#endif
     245
     246}  // end kcm_init()
    260247
    261248///////////////////////////////
     
    263250{
    264251        kcm_page_t   * kcm_page;
    265         list_entry_t * iter;
     252
     253    // build extended pointer on  KCM lock
     254    xptr_t lock_xp = XPTR( local_cxy , &kcm->lock );
    266255
    267256        // get KCM lock
    268         busylock_acquire( &kcm->lock );
    269 
    270         // release all free pages
    271         LIST_FOREACH( &kcm->free_root , iter )
    272         {
    273                 kcm_page = (kcm_page_t *)LIST_ELEMENT( iter , kcm_page_t , list );
    274                 list_unlink( iter );
    275                 kcm->free_pages_nr --;
     257        remote_busylock_acquire( lock_xp );
     258
     259        // release all full pages
     260        while( list_is_empty( &kcm->full_root ) == false )
     261        {
     262                kcm_page = LIST_FIRST( &kcm->full_root , kcm_page_t , list );
     263                list_unlink( &kcm_page->list );
    276264                ppm_free_pages( kcm_page->page );
    277265        }
    278266
    279         // release all active pages
    280         LIST_FOREACH( &kcm->active_root , iter )
    281         {
    282                 kcm_page = (kcm_page_t *)LIST_ELEMENT( iter , kcm_page_t , list );
    283                 list_unlink( iter );
    284                 kcm->free_pages_nr --;
     267    // release all empty pages
     268    while( list_is_empty( &kcm->active_root ) == false )
     269        {
     270                kcm_page = LIST_FIRST( &kcm->active_root , kcm_page_t , list );
     271                list_unlink( &kcm_page->list );
    285272                ppm_free_pages( kcm_page->page );
    286273        }
    287274
    288         // release all busy pages
    289         LIST_FOREACH( &kcm->busy_root , iter )
    290         {
    291                 kcm_page = (kcm_page_t *)LIST_ELEMENT( iter , kcm_page_t , list );
    292                 list_unlink( iter );
    293                 kcm->free_pages_nr --;
    294                 ppm_free_pages( kcm_page->page );
    295         }
    296 
    297275        // release KCM lock
    298         busylock_release( &kcm->lock );
     276        remote_busylock_release( lock_xp );
    299277}
    300278
    301 ///////////////////////////////
    302 void * kcm_alloc( kcm_t * kcm )
    303 {
     279//////////////////////////////////
     280void * kcm_alloc( uint32_t order )
     281{
     282    kcm_t      * kcm_ptr;
    304283        kcm_page_t * kcm_page;
    305         void       * ptr = NULL;   // pointer on block
     284        void       * block_ptr;
     285
     286    // min block size is 64 bytes
     287    if( order < 6 ) order = 6;
     288
     289assert( (order < 12) , "order = %d / must be less than 12" , order );
     290
     291    // get local pointer on relevant KCM allocator
     292    kcm_ptr = &LOCAL_CLUSTER->kcm[order - 6];
     293
     294    // build extended pointer on local KCM lock
     295    xptr_t lock_xp = XPTR( local_cxy , &kcm_ptr->lock );
     296
     297        // get KCM lock
     298        remote_busylock_acquire( lock_xp );
     299
     300    // get a non-full kcm_page
     301    kcm_page = kcm_get_page( kcm_ptr );
     302
     303    if( kcm_page == NULL )
     304        {
     305                remote_busylock_release( lock_xp );
     306                return NULL;
     307        }
     308
     309        // get a block from selected active page
     310        block_ptr = kcm_get_block( kcm_ptr , kcm_page );
     311
     312        // release lock
     313        remote_busylock_release( lock_xp );
     314
     315#if DEBUG_KCM
     316thread_t * this  = CURRENT_THREAD;
     317uint32_t   cycle = (uint32_t)hal_get_cycles();
     318if( DEBUG_KCM < cycle )
     319printk("\n[%s] thread[%x,%x] allocated block %x / order %d / kcm %x / status[%x,%x] / count %d\n",
     320__FUNCTION__, this->process->pid, this->trdid, block_ptr, order, kcm_ptr,
     321GET_CXY(kcm_page->status), GET_PTR(kcm_page->status), kcm_page->count );
     322#endif
     323
     324        return block_ptr;
     325
     326}  // end kcm_alloc()
     327
     328/////////////////////////////////
     329void kcm_free( void * block_ptr )
     330{
     331    kcm_t      * kcm_ptr;
     332        kcm_page_t * kcm_page;
     333
     334// check argument
     335assert( (block_ptr != NULL) , "block pointer cannot be NULL" );
     336
     337    // get local pointer on KCM page
     338        kcm_page = (kcm_page_t *)((intptr_t)block_ptr & ~CONFIG_PPM_PAGE_MASK);
     339
     340    // get local pointer on KCM descriptor
     341        kcm_ptr = kcm_page->kcm;
     342
     343#if DEBUG_KCM
     344thread_t * this  = CURRENT_THREAD;
     345uint32_t   cycle = (uint32_t)hal_get_cycles();
     346if( DEBUG_KCM < cycle )
     347printk("\n[%s] thread[%x,%x] release block %x / order %d / kcm %x / status [%x,%x] / count %d\n",
     348__FUNCTION__, this->process->pid, this->trdid, block_ptr, kcm_ptr->order, kcm_ptr,
     349GET_CXY(kcm_page->status), GET_PTR(kcm_page->status), kcm_page->count );
     350#endif
     351
     352    // build extended pointer on local KCM lock
     353    xptr_t lock_xp = XPTR( local_cxy , &kcm_ptr->lock );
    306354
    307355        // get lock
    308         busylock_acquire( &kcm->lock );
    309 
    310         // get an active page
    311         if( list_is_empty( &kcm->active_root ) )  // no active page => get one
    312         {
    313                 // get a page from free list
    314                 kcm_page = freelist_get( kcm );
    315 
    316                 if( kcm_page == NULL )
    317                 {
    318                         busylock_release( &kcm->lock );
    319                         return NULL;
    320                 }
    321 
    322                 // insert page in active list
    323                 list_add_first( &kcm->active_root , &kcm_page->list );
    324                 kcm->active_pages_nr ++;
    325                 kcm_page->active = 1;
    326         }
    327         else                                    // get first page from active list
    328         {
    329                 // get page pointer from active list
    330                 kcm_page = (kcm_page_t *)LIST_FIRST( &kcm->active_root , kcm_page_t , list );
     356        remote_busylock_acquire( lock_xp );
     357
     358        // release block
     359        kcm_put_block( kcm_ptr , kcm_page , block_ptr );
     360
     361        // release lock
     362        remote_busylock_release( lock_xp );
     363}
     364
     365/////////////////////////////////////////////////////////////////////////////////////
     366//        Remote access functions
     367/////////////////////////////////////////////////////////////////////////////////////
     368
     369/////////////////////////////////////////////////////////////////////////////////////
     370// This static function can be called by any thread running in any cluster.
     371// It returns a local pointer on a block allocated from an non-full kcm_page.
     372// It makes a panic if no block available in selected page.
     373// It changes the page status as required.
     374/////////////////////////////////////////////////////////////////////////////////////
     375// @ kcm_cxy  : remote KCM cluster identidfier.
     376// @ kcm_ptr  : local pointer on remote KCM allocator.
     377// @ kcm_page : pointer on active kcm page to use.
     378// @ return a local pointer on the allocated block.
     379/////////////////////////////////////////////////////////////////////////////////////
     380static void * __attribute__((noinline)) kcm_remote_get_block( cxy_t        kcm_cxy,
     381                                                              kcm_t      * kcm_ptr,
     382                                                              kcm_page_t * kcm_page )
     383{
     384    uint32_t order  = hal_remote_l32( XPTR( kcm_cxy , &kcm_ptr->order ) );
     385    uint32_t max    = hal_remote_l32( XPTR( kcm_cxy , &kcm_ptr->max_blocks ) );
     386    uint32_t count  = hal_remote_l32( XPTR( kcm_cxy , &kcm_page->count ) );
     387    uint64_t status = hal_remote_l64( XPTR( kcm_cxy , &kcm_page->status ) );
     388    uint32_t size   = 1 << order;
     389
     390assert( (count < max) , "kcm_page should not be full" );
     391
     392    uint32_t index  = 1;
     393    uint64_t mask   = (uint64_t)0x2;
     394    uint32_t found  = 0;
     395   
     396        // allocate first free block in kcm_page, update status,
     397    // and count , compute index of allocated block in kcm_page
     398    while( index <= max )
     399    {
     400        if( (status & mask) == 0 )   // block non allocated
     401        {
     402            hal_remote_s64( XPTR( kcm_cxy , &kcm_page->status ) , status | mask );
     403            hal_remote_s64( XPTR( kcm_cxy , &kcm_page->count  ) , count + 1 );
     404            found  = 1;
     405            break;     
     406        }
     407       
     408        index++;
     409        mask <<= 1;
     410    }
     411
     412        // change the page list if almost full
     413        if( count == max-1 )
     414        {
     415                list_remote_unlink( kcm_cxy , &kcm_page->list );
     416                hal_remote_atomic_add( XPTR( kcm_cxy , &kcm_ptr->active_pages_nr ) , -1 );
     417
     418                list_remote_add_first( kcm_cxy , &kcm_ptr->full_root , &kcm_page->list );
     419                hal_remote_atomic_add( XPTR( kcm_cxy , &kcm_ptr->full_pages_nr ) , 1 );
     420        }
     421
     422        // compute return pointer
     423        void * ptr = (void *)((intptr_t)kcm_page + (index * size) );
     424
     425#if DEBUG_KCM_REMOTE
     426thread_t * this  = CURRENT_THREAD;
     427uint32_t   cycle = (uint32_t)hal_get_cycles();
     428if( DEBUG_KCM_REMOTE < cycle )
     429printk("\n[%s] thread[%x,%x] get block %x in page %x / cluster %x / size %x / count %d\n",
     430__FUNCTION__, this->process->pid, this->trdid,
     431ptr, kcm_page, kcm_cxy, size, count + 1 );
     432#endif
     433
     434        return ptr;
     435
     436}  // end kcm_remote_get_block()
     437
     438/////////////////////////////////////////////////////////////////////////////////////
     439// This private static function can be called by any thread running in any cluster.
     440// It releases a previously allocated block to the relevant kcm_page.
     441// It changes the kcm_page status as required.
     442/////////////////////////////////////////////////////////////////////////////////////
     443// @ kcm_cxy   : remote KCM cluster identifier
     444// @ kcm_ptr   : local pointer on remote KCM.
     445// @ kcm_page  : local pointer on kcm_page.
     446// @ block_ptr : pointer on block to be released.
     447/////////////////////////////////////////////////////////////////////////////////////
     448static void __attribute__((noinline)) kcm_remote_put_block ( cxy_t        kcm_cxy,
     449                                                             kcm_t      * kcm_ptr,
     450                                                             kcm_page_t * kcm_page,
     451                                                             void       * block_ptr )
     452{
     453    uint32_t max    = hal_remote_l32( XPTR( kcm_cxy , &kcm_ptr->max_blocks ) );
     454    uint32_t order  = hal_remote_l32( XPTR( kcm_cxy , &kcm_ptr->order ) );
     455    uint32_t count  = hal_remote_l32( XPTR( kcm_cxy , &kcm_page->count ) );
     456    uint64_t status = hal_remote_l64( XPTR( kcm_cxy , &kcm_page->status ) );
     457    uint32_t size   = 1 << order;
     458   
     459        // compute block index from block pointer
     460        uint32_t index = ((intptr_t)block_ptr - (intptr_t)kcm_page) / size;
     461
     462    // compute mask in bit vector
     463    uint64_t mask = 1 << index;
     464
     465assert( (status & mask) , "released page not allocated" );
     466
     467    // update status & count in kcm_page
     468        hal_remote_s64( XPTR( kcm_cxy , &kcm_page->status ) , status & ~mask );
     469        hal_remote_s32( XPTR( kcm_cxy , &kcm_page->count  ) , count - 1 );
     470
     471        // change the page list if page was full
     472        if( count == max )
     473        {
     474                list_remote_unlink( kcm_cxy , &kcm_page->list );
     475                hal_remote_atomic_add( XPTR( kcm_cxy , &kcm_ptr->full_pages_nr ) , -1 );
     476
     477                list_remote_add_last( kcm_cxy , &kcm_ptr->active_root, &kcm_page->list );
     478                hal_remote_atomic_add( XPTR( kcm_cxy , &kcm_ptr->active_pages_nr ) , 1 );
     479        }
     480
     481#if (DEBUG_KCM_REMOTE & 1)
     482thread_t * this  = CURRENT_THREAD;
     483uint32_t   cycle = (uint32_t)hal_get_cycles();
     484if( DEBUG_KCM_REMOTE < cycle )
     485printk("\n[%s] thread[%x,%x] released block %x in page %x / cluster %x / size %x / count %d\n",
     486__FUNCTION__, this->process->pid, this->trdid, block_ptr, kcm_page, size, count - 1 )
     487#endif
     488
     489}  // end kcm_remote_put_block()
     490
     491/////////////////////////////////////////////////////////////////////////////////////
     492// This private static function can be called by any thread running in any cluster.
     493// It gets one non-full KCM page from the remote KCM.
     494// It allocates a page from remote PPM to populate the freelist, and initialises
     495// the kcm_page descriptor when required.
     496/////////////////////////////////////////////////////////////////////////////////////
     497static kcm_page_t * __attribute__((noinline)) kcm_remote_get_page( cxy_t    kcm_cxy,
     498                                                                   kcm_t  * kcm_ptr )
     499{
     500    kcm_page_t * kcm_page;    // local pointer on remote KCM page
     501
     502    uint32_t active_pages_nr = hal_remote_l32( XPTR( kcm_cxy , &kcm_ptr->active_pages_nr ) );
     503
     504    if( active_pages_nr > 0 )       // return first active page
     505    {
     506        kcm_page = LIST_REMOTE_FIRST( kcm_cxy , &kcm_ptr->active_root , kcm_page_t , list );
     507    }
     508    else                            // allocate a new page from PPM
     509        {
     510        // get one 4 Kbytes page from remote PPM
     511        page_t * page = ppm_remote_alloc_pages( kcm_cxy , 0 );
     512
     513            if( page == NULL )
     514            {
     515                    printk("\n[ERROR] in %s : failed to allocate page in cluster %x\n",
     516                __FUNCTION__ , kcm_cxy );
     517
     518                    return NULL;
     519        }
     520
     521            // get remote page base address
     522            xptr_t base_xp = ppm_page2base( XPTR( kcm_cxy , page ) );
     523
     524        // get local pointer on kcm_page
     525            kcm_page = GET_PTR( base_xp );
     526
     527            // initialize kcm_page descriptor
     528            hal_remote_s32( XPTR( kcm_cxy , &kcm_page->count )  , 0 );
     529            hal_remote_s64( XPTR( kcm_cxy , &kcm_page->status ) , 0 );
     530            hal_remote_spt( XPTR( kcm_cxy , &kcm_page->kcm )    , kcm_ptr );
     531            hal_remote_spt( XPTR( kcm_cxy , &kcm_page->page )   , page );
     532
     533            // introduce new page in remote KCM active_list
     534            list_remote_add_first( kcm_cxy , &kcm_ptr->active_root , &kcm_page->list );
     535            hal_remote_atomic_add( XPTR( kcm_cxy , &kcm_ptr->active_pages_nr ) , 1 );
     536        }
     537
     538        return kcm_page;
     539
     540}  // end kcm_remote_get_page()
     541
     542/////////////////////////////////////////
     543void * kcm_remote_alloc( cxy_t    kcm_cxy,
     544                         uint32_t order )
     545{
     546    kcm_t      * kcm_ptr;
     547    kcm_page_t * kcm_page;
     548    void       * block_ptr;
     549
     550    if( order < 6 ) order = 6;
     551
     552assert( (order < 12) , "order = %d / must be less than 12" , order );
     553
     554    // get local pointer on relevant KCM allocator
     555    kcm_ptr = &LOCAL_CLUSTER->kcm[order - 6];
     556
     557    // build extended pointer on remote KCM lock
     558    xptr_t lock_xp = XPTR( kcm_cxy , &kcm_ptr->lock );
     559
     560        // get lock
     561        remote_busylock_acquire( lock_xp );
     562
     563    // get a non-full kcm_page
     564    kcm_page = kcm_remote_get_page( kcm_cxy , kcm_ptr );
     565
     566    if( kcm_page == NULL )
     567        {
     568                remote_busylock_release( lock_xp );
     569                return NULL;
    331570        }
    332571
    333572        // get a block from selected active page
    334         // cannot fail, as an active page cannot be full...
    335         ptr  = kcm_get_block( kcm , kcm_page );
     573        block_ptr = kcm_remote_get_block( kcm_cxy , kcm_ptr , kcm_page );
    336574
    337575        // release lock
    338         busylock_release( &kcm->lock );
    339 
    340         return ptr;
     576        remote_busylock_release( lock_xp );
     577
     578#if DEBUG_KCM_REMOTE
     579thread_t * this  = CURRENT_THREAD;
     580uint32_t   cycle = (uint32_t)hal_get_cycles();
     581if( DEBUG_KCM_REMOTE < cycle )
     582printk("\n[%s] thread[%x,%x] allocated block %x / order %d / kcm[%x,%x]\n",
     583__FUNCTION__, this->process->pid, this->trdid, block_ptr, order, kcm_cxy, kcm_ptr );
     584#endif
     585
     586        return block_ptr;
     587
     588}  // end kcm_remote_alloc()
     589
     590/////////////////////////////////////
     591void kcm_remote_free( cxy_t  kcm_cxy,
     592                      void * block_ptr )
     593{
     594        kcm_t      * kcm_ptr;
     595        kcm_page_t * kcm_page;
     596
     597// check argument
     598assert( (block_ptr != NULL) , "block pointer cannot be NULL" );
     599
     600    // get local pointer on remote KCM page
     601        kcm_page = (kcm_page_t *)((intptr_t)block_ptr & ~CONFIG_PPM_PAGE_MASK);
     602
     603    // get local pointer on remote KCM
     604        kcm_ptr = hal_remote_lpt( XPTR( kcm_cxy , &kcm_page->kcm ) );
     605
     606    // build extended pointer on remote KCM lock
     607    xptr_t lock_xp = XPTR( kcm_cxy , &kcm_ptr->lock );
     608
     609        // get lock
     610        remote_busylock_acquire( lock_xp );
     611
     612        // release block
     613        kcm_remote_put_block( kcm_cxy , kcm_ptr , kcm_page , block_ptr );
     614
     615        // release lock
     616        remote_busylock_release( lock_xp );
     617
     618#if DEBUG_KCM_REMOTE
     619thread_t * this  = CURRENT_THREAD;
     620uint32_t   cycle = (uint32_t)hal_get_cycles();
     621uint32_t   order = hal_remote_l32( XPTR( kcm_cxy , &kcm_ptr->order ) );
     622if( DEBUG_KCM_REMOTE < cycle )
     623printk("\n[%s] thread[%x,%x] released block %x / order %d / kcm[%x,%x]\n",
     624__FUNCTION__, this->process->pid, this->trdid, block_ptr, order, kcm_cxy, kcm_ptr );
     625#endif
     626
     627}  // end kcm_remote_free
     628
     629/////////////////////////////////////////
     630void kcm_remote_display( cxy_t   kcm_cxy,
     631                         kcm_t * kcm_ptr )
     632{
     633    uint32_t order           = hal_remote_l32( XPTR( kcm_cxy , &kcm_ptr->order) );
     634    uint32_t full_pages_nr   = hal_remote_l32( XPTR( kcm_cxy , &kcm_ptr->full_pages_nr ) );
     635    uint32_t active_pages_nr = hal_remote_l32( XPTR( kcm_cxy , &kcm_ptr->active_pages_nr ) );
     636
     637        printk("*** KCM / cxy %x / order %d / full_pages %d / empty_pages %d / active_pages %d\n",
     638        kcm_cxy, order, full_pages_nr, active_pages_nr );
    341639}
    342 
    343 ///////////////////////////
    344 void kcm_free( void * ptr )
    345 {
    346         kcm_page_t * kcm_page;
    347         kcm_t      * kcm;
    348 
    349 // check argument
    350 assert( (ptr != NULL) , "pointer cannot be NULL" );
    351 
    352         kcm_page = (kcm_page_t *)((intptr_t)ptr & ~CONFIG_PPM_PAGE_MASK);
    353         kcm      = kcm_page->kcm;
    354 
    355         // get lock
    356         busylock_acquire( &kcm->lock );
    357 
    358         // release block
    359         kcm_put_block( kcm , kcm_page , ptr );
    360 
    361         // release lock
    362         busylock_release( &kcm->lock );
    363 }
    364 
    365 ////////////////////////////
    366 void kcm_print (kcm_t * kcm)
    367 {
    368         printk("*** KCM type = %s / free_pages = %d / busy_pages = %d / active_pages = %d\n",
    369                kmem_type_str( kcm->type ) ,
    370                kcm->free_pages_nr ,
    371                kcm->busy_pages_nr ,
    372                kcm->active_pages_nr );
    373 }
Note: See TracChangeset for help on using the changeset viewer.