Changeset 50 for trunk/kernel/mm


Ignore:
Timestamp:
Jun 26, 2017, 3:15:11 PM (7 years ago)
Author:
alain
Message:

bloup

Location:
trunk/kernel/mm
Files:
7 edited

Legend:

Unmodified
Added
Removed
  • trunk/kernel/mm/kcm.c

    r20 r50  
    33 *
    44 * Author  Ghassan Almaless (2008,2009,2010,2011,2012)
    5  *         Alain Greiner    (2016)
     5 *         Alain Greiner    (2016,2017)
    66 *
    77 * Copyright (c) UPMC Sorbonne Universites
     
    4141// It changes the page status if required.
    4242//////////////////////////////////////////////////////////////////////////////////////
    43 // @ kcm   : pointer on kcm allocator.
    44 // @ ptr  : pointer on active kcm page to use.
     43// @ kcm      : pointer on kcm allocator.
     44// @ kcm_page : pointer on active kcm page to use.
    4545/////////////////////////////////////////////////////////////////////////////////////
    4646static void * kcm_get_block( kcm_t      * kcm,
    47                              kcm_page_t * page )
    48 {
    49         assert( page->active , __FUNCTION__ , "kcm page should be active" );
     47                             kcm_page_t * kcm_page )
     48{
     49        kcm_dmsg("\n[INFO] %s : enters for %s / page %x / count = %d / active = %d\n",
     50                 __FUNCTION__ , kmem_type_str( kcm->type ) ,
     51             (intptr_t)kcm_page , kcm_page->count , kcm_page->active );
     52
     53        assert( kcm_page->active , __FUNCTION__ , "kcm_page should be active" );
    5054
    5155        // get first block available
    52         int32_t index = bitmap_ffs( page->bitmap , kcm->blocks_nr );
    53 
    54         assert( (index != -1) , __FUNCTION__ , "kcm page should not be full" );
     56        int32_t index = bitmap_ffs( kcm_page->bitmap , kcm->blocks_nr );
     57
     58        assert( (index != -1) , __FUNCTION__ , "kcm_page should not be full" );
    5559
    5660        // allocate block
    57         bitmap_clear( page->bitmap , index );
    58 
    59         // increase page refcount
    60         page->refcount ++;
    61 
    62         // change the page to busy no more free block in page
    63         if( page->refcount >= kcm->blocks_nr )
    64         {
    65                 page->active = 0;
    66                 list_unlink( &page->list);
     61        bitmap_clear( kcm_page->bitmap , index );
     62
     63        // increase kcm_page count
     64        kcm_page->count ++;
     65
     66        // change the kcm_page to busy if no more free block in page
     67        if( kcm_page->count >= kcm->blocks_nr )
     68        {
     69        kcm_page->active = 0;
     70                list_unlink( &kcm_page->list);
    6771                kcm->active_pages_nr --;
    6872
    69                 list_add_first( &kcm->busy_root , &page->list);
     73                list_add_first( &kcm->busy_root , &kcm_page->list);
    7074                kcm->busy_pages_nr ++;
    71                 page->busy   = 1;
    72         }
    73 
    74         return (page->base + index * kcm->block_size );
     75                kcm_page->busy = 1;
     76        }
     77
     78    // compute return pointer
     79    void * ptr = (void *)((intptr_t)kcm_page + CONFIG_KCM_SLOT_SIZE
     80                 + (index * kcm->block_size) );
     81
     82        kcm_dmsg("\n[INFO] %s : allocated one block  %s / ptr = %x / page = %x / count = %d\n",
     83                 __FUNCTION__ , kmem_type_str( kcm->type ) , (uint32_t)ptr ,
     84             (intptr_t)kcm_page , kcm_page->count );
     85
     86        return ptr;
    7587
    7688}  // kcm_get_block()
     
    7890/////////////////////////////////////////////////////////////////////////////////////
    7991// This static function releases a previously allocated block.
    80 // It changes the page status if required.
     92// It changes the kcm_page status if required.
    8193/////////////////////////////////////////////////////////////////////////////////////
    8294// @ kcm   : pointer on kcm allocator.
     
    8698                            void  * ptr )
    8799{
    88         kcm_page_t * page;
     100        kcm_page_t * kcm_page;
    89101        uint32_t     index;
    90102
    91         page = (kcm_page_t*)((intptr_t)ptr & CONFIG_PPM_PAGE_MASK);
    92         index = ((uint8_t*)ptr - page->base) / kcm->block_size;
    93 
    94         bitmap_set( page->bitmap , index );
    95         page->refcount --;
     103    // compute pointer on kcm_page from block pointer
     104        kcm_page = (kcm_page_t*)((intptr_t)ptr & ~CONFIG_PPM_PAGE_MASK);
     105
     106    // compute block index from block pointer
     107        index = ((uint8_t *)ptr - (uint8_t *)kcm_page - CONFIG_KCM_SLOT_SIZE) / kcm->block_size;
     108
     109        bitmap_set( kcm_page->bitmap , index );
     110        kcm_page->count --;
    96111
    97112        // change the page to active if it was busy
    98         if( page->busy )
    99         {
    100                 page->busy = 0;
    101                 list_unlink( &page->list );
     113        if( kcm_page->busy )
     114        {
     115                kcm_page->busy = 0;
     116                list_unlink( &kcm_page->list );
    102117                kcm->busy_pages_nr --;
    103118
    104                 list_add_last( &kcm->active_root, &page->list );
     119                list_add_last( &kcm->active_root, &kcm_page->list );
    105120                kcm->active_pages_nr ++;
    106                 page->active = 1;
    107         }
    108 
    109         // change the page to free if last block in active page
    110         if( (page->active) && (page->refcount == 0) )
    111         {
    112                 page->active = 0;
    113                 list_unlink( &page->list);
     121                kcm_page->active = 1;
     122        }
     123
     124        // change the kcm_page to free if last block in active page
     125        if( (kcm_page->active) && (kcm_page->count == 0) )
     126        {
     127                kcm_page->active = 0;
     128                list_unlink( &kcm_page->list);
    114129                kcm->active_pages_nr --;
    115130
    116                 list_add_first( &kcm->free_root , &page->list);
     131                list_add_first( &kcm->free_root , &kcm_page->list);
    117132                kcm->free_pages_nr ++;
    118133        }
     
    121136/////////////////////////////////////////////////////////////////////////////////////
    122137// This static function allocates one page from PPM. It initializes
    123 // the KCM-page descriptor, and introduces the new page into freelist.
     138// the kcm_page descriptor, and introduces the new kcm_page into freelist.
    124139/////////////////////////////////////////////////////////////////////////////////////
    125140static error_t freelist_populate( kcm_t * kcm )
    126141{
    127142        page_t     * page;
    128         kcm_page_t * ptr;
     143        kcm_page_t * kcm_page;
    129144        kmem_req_t   req;
    130145
     
    143158
    144159        // get page base address
    145         ptr = ppm_page2base( page );
     160        kcm_page = (kcm_page_t *)ppm_page2base( page );
    146161
    147162        // initialize KCM-page descriptor
    148         bitmap_set_range( ptr->bitmap , 0 , kcm->blocks_nr );
    149 
    150         ptr->busy          = 0;
    151         ptr->active        = 0;
    152         ptr->refcount      = 0;
    153         ptr->base          = (uint8_t*)ptr + kcm->block_size;
    154         ptr->kcm           = kcm;
    155         ptr->page          = page;
     163        bitmap_set_range( kcm_page->bitmap , 0 , kcm->blocks_nr );
     164
     165        kcm_page->busy          = 0;
     166        kcm_page->active        = 0;
     167        kcm_page->count      = 0;
     168        kcm_page->kcm           = kcm;
     169        kcm_page->page          = page;
    156170
    157171        // introduce new page in free-list
    158         list_add_first( &kcm->free_root , &ptr->list );
     172        list_add_first( &kcm->free_root , &kcm_page->list );
    159173        kcm->free_pages_nr ++;
    160174
     
    170184{
    171185        error_t      error;
    172         kcm_page_t * page;
     186        kcm_page_t * kcm_page;
    173187
    174188        // get a new page from PPM if freelist empty
     
    179193        }
    180194
    181         // get first KCM page from freelist and change its status to active
    182         page = LIST_FIRST( &kcm->free_root, kcm_page_t , list );
    183         list_unlink( &page->list );
     195        // get first KCM page from freelist and unlink it
     196        kcm_page = LIST_FIRST( &kcm->free_root, kcm_page_t , list );
     197        list_unlink( &kcm_page->list );
    184198        kcm->free_pages_nr --;
    185199
    186         return page;
     200        return kcm_page;
    187201
    188202} // freelist_get()
     
    193207                   uint32_t   type )
    194208{
    195         uint32_t     blocks_nr;
    196         uint32_t     block_size;
    197         uint32_t     remaining;
     209    // the kcm_page descriptor mut fit in the KCM slot
     210    assert( (sizeof(kcm_page_t) <= CONFIG_KCM_SLOT_SIZE) ,
     211             __FUNCTION__ , "KCM slot too small\n" );
    198212
    199213        // initialize lock
     
    211225        list_root_init( &kcm->active_root );
    212226
    213         // initialize block size and number of blocks per page
    214         block_size      = ARROUND_UP( kmem_type_size( type ) , 64 );
    215         blocks_nr       = CONFIG_PPM_PAGE_SIZE / block_size;
    216         remaining       = CONFIG_PPM_PAGE_SIZE % block_size;
    217         blocks_nr       = (remaining >= sizeof(kcm_page_t)) ? blocks_nr : blocks_nr - 1;
    218 
    219         kcm->blocks_nr  = blocks_nr;
     227        // initialize block size
     228        uint32_t block_size = ARROUND_UP( kmem_type_size( type ) , CONFIG_KCM_SLOT_SIZE );
    220229        kcm->block_size = block_size;
    221230
     231        // initialize number of blocks per page
     232        uint32_t  blocks_nr = (CONFIG_PPM_PAGE_SIZE - CONFIG_KCM_SLOT_SIZE) / block_size;
     233    kcm->blocks_nr = blocks_nr;
     234
    222235        kcm_dmsg("\n[INFO] %s : KCM %s initialised / block_size = %d / blocks_nr = %d\n",
    223                  __FUNCTION__ , kmem_type_str( type ) , block_size , blocks_nr );
     236                 __FUNCTION__ , kmem_type_str( type ) , kcm->block_size , kcm->blocks_nr );
    224237
    225238}  // kcm_init()
     
    228241void kcm_destroy( kcm_t * kcm )
    229242{
    230         kcm_page_t   * page;
     243        kcm_page_t   * kcm_page;
    231244        list_entry_t * iter;
    232245
     
    237250        LIST_FOREACH( &kcm->free_root , iter )
    238251        {
    239                 page = (kcm_page_t *)LIST_ELEMENT( iter , kcm_page_t , list );
     252                kcm_page = (kcm_page_t *)LIST_ELEMENT( iter , kcm_page_t , list );
    240253                list_unlink( iter );
    241254                kcm->free_pages_nr --;
    242                 ppm_free_pages( page->page );
     255                ppm_free_pages( kcm_page->page );
    243256        }
    244257
     
    246259        LIST_FOREACH( &kcm->active_root , iter )
    247260        {
    248                 page = (kcm_page_t *)LIST_ELEMENT( iter , kcm_page_t , list );
     261                kcm_page = (kcm_page_t *)LIST_ELEMENT( iter , kcm_page_t , list );
    249262                list_unlink( iter );
    250263                kcm->free_pages_nr --;
    251                 ppm_free_pages( page->page );
     264                ppm_free_pages( kcm_page->page );
    252265        }
    253266
     
    255268        LIST_FOREACH( &kcm->busy_root , iter )
    256269        {
    257                 page = (kcm_page_t *)LIST_ELEMENT( iter , kcm_page_t , list );
     270                kcm_page = (kcm_page_t *)LIST_ELEMENT( iter , kcm_page_t , list );
    258271                list_unlink( iter );
    259272                kcm->free_pages_nr --;
    260                 ppm_free_pages( page->page );
     273                ppm_free_pages( kcm_page->page );
    261274        }
    262275
     
    269282void * kcm_alloc( kcm_t * kcm )
    270283{
    271         kcm_page_t * page;
     284        kcm_page_t * kcm_page;
    272285        void       * ptr = NULL;   // pointer on block
    273286
     
    278291        if( list_is_empty( &kcm->active_root ) )  // no active page => get one
    279292        {
    280                 kcm_dmsg("\n[INFO] %s : enters for type %s but no active page => get one\n",
    281                          __FUNCTION__ , kmem_type_str( kcm->type ) );
    282 
    283293                // get a page from free list
    284                 page = freelist_get( kcm );
    285                 if( page == NULL ) return NULL;
     294                kcm_page = freelist_get( kcm );
     295
     296                if( kcm_page == NULL ) return NULL;
    286297
    287298                // insert page in active list
    288                 list_add_first( &kcm->active_root , &page->list );
     299                list_add_first( &kcm->active_root , &kcm_page->list );
    289300                kcm->active_pages_nr ++;
    290                 page->active = 1;
    291         }
    292         else                     // get first page from active list
    293         {
    294                 kcm_dmsg("\n[INFO] %s : enters for type %s with an active page\n",
    295                          __FUNCTION__ , kmem_type_str( kcm->type ) );
    296 
     301            kcm_page->active = 1;
     302
     303        kcm_dmsg("\n[INFO] %s : enters for type %s at cycle %d / new page = %x / count = %d\n",
     304                         __FUNCTION__ , kmem_type_str( kcm->type ) , hal_time_stamp() ,
     305                 (intptr_t)kcm_page , kcm_page->count );
     306
     307        }
     308        else                                    // get first page from active list
     309        {
    297310                // get page pointer from active list
    298                 page = (kcm_page_t *)LIST_FIRST( &kcm->active_root , kcm_page_t , list );
     311                kcm_page = (kcm_page_t *)LIST_FIRST( &kcm->active_root , kcm_page_t , list );
     312
     313                kcm_dmsg("\n[INFO] %s : enters for type %s at cycle %d / page = %x / count = %d\n",
     314                         __FUNCTION__ , kmem_type_str( kcm->type ) , hal_time_stamp() , 
     315                 (intptr_t)kcm_page , kcm_page->count );
    299316        }
    300317
    301318        // get a block from selected active page
    302319        // cannot fail, as an active page cannot be full...
    303         ptr  = kcm_get_block( kcm , page );
     320        ptr  = kcm_get_block( kcm , kcm_page );
    304321
    305322        // release lock
    306         spinlock_unlock(&kcm->lock);
    307 
    308         kcm_dmsg("\n[INFO] %s : allocated one block of type %s / ptr = %x\n",
    309                  __FUNCTION__ , kmem_type_str( kcm->type ) , (uint32_t)ptr );
     323        spinlock_unlock( &kcm->lock );
    310324
    311325        return ptr;
    312326
    313 }  // kcm_alloc()
     327}  // end kcm_allo()
    314328
    315329///////////////////////////
    316330void kcm_free( void * ptr )
    317331{
    318         kcm_page_t * page;
     332        kcm_page_t * kcm_page;
    319333        kcm_t      * kcm;
    320334
    321         if( ptr == NULL ) return;
    322 
    323         page = (kcm_page_t *)((intptr_t)ptr & CONFIG_PPM_PAGE_MASK);
    324         kcm  = page->kcm;
     335        assert( (ptr != NULL) , __FUNCTION__ , "pointer cannot be NULL" );
     336
     337        kcm_page = (kcm_page_t *)((intptr_t)ptr & ~CONFIG_PPM_PAGE_MASK);
     338        kcm      = kcm_page->kcm;
    325339
    326340        // get lock
     
    332346        // release lock
    333347        spinlock_unlock( &kcm->lock );
    334 }
     348
     349}  // end kcm_free()
    335350
    336351////////////////////////////
  • trunk/kernel/mm/kcm.h

    r23 r50  
    3636 * This structure defines a generic Kernel Cache Manager, that is a block allocator,
    3737 * for fixed size objects. It exists a specific KCM allocator for each object type.
    38  * The actual allocated block size is the smallest multiple of 64 bytes that can
    39  * contain one single object.
     38 * The actual allocated block size is the smallest multiple of the KCM slot, that
     39 * contain one single object. The KCM slot is typically 64 bytes, as it must be large
     40 * enough to store the kcm_page descriptor, defined below.
    4041 * The various KCM allocators themselves are not statically allocated in the cluster
    4142 * manager, but are dynamically allocated when required, using the embedded KCM
     
    4647{
    4748        spinlock_t           lock;             /*! protect exclusive access to allocator   */
    48         uint32_t             block_size;       /*! actual block size (bytes)               */
    49         uint32_t             blocks_nr;        /*! number of blocks per page               */
     49        uint32_t             block_size;       /*! rounded block size (bytes)              */
     50        uint32_t             blocks_nr;        /*! max number of blocks per page           */
    5051
    5152        list_entry_t         active_root;      /*! root of active pages list               */
     
    6465/****************************************************************************************
    6566 * This structure defines a KCM-page descriptor.
    66  * A KCM-page can contain up to (CONFIG_PPM_PAGE_SIZE / CONFIG_CACHE_LINE_SIZE) blocks.
     67 * A KCM-page contains at most (CONFIG_PPM_PAGE_SIZE / CONFIG_KCM_SLOT_SIZE) blocks.
    6768 * This kcm page descriptor is stored in the first slot of the page.
    6869 ***************************************************************************************/
     
    7071typedef struct kcm_page_s
    7172{
    72         uint32_t        bitmap[BITMAP_SIZE(CONFIG_KCM_BLOCKS_MAX)];
    73         uint8_t       * base;                  /*! pointer on first block in page          */
    74         kcm_t         * kcm;                   /*! owner KCM allocator                     */
     73        uint32_t        bitmap[2];             /*! at most 64 blocks in a single page      */
    7574        list_entry_t    list;                  /*! [active / busy / free] list member      */
     75    kcm_t         * kcm;                   /*! pointer on kcm allocator                */ 
    7676        page_t        * page;                  /*! pointer on the physical page descriptor */
    77         uint8_t         refcount;              /*! number of allocated blocks              */
    78         uint8_t         busy;                  /*! page busy if non zero                   */
    79         uint8_t         active;                /*! page active if non zero                 */
    80         uint8_t         unused;                /*!                                         */
     77        uint32_t        count;                 /*! number of allocated blocks              */
     78        uint32_t        busy;                  /*! page busy if non zero                   */
     79        uint32_t        active;                /*! page active if non zero                 */
    8180}
    8281kcm_page_t;
  • trunk/kernel/mm/kmem.c

    r23 r50  
    103103    else if( type == KMEM_SEM )           return sizeof( remote_sem_t );
    104104    else if( type == KMEM_CONDVAR )       return sizeof( remote_condvar_t );
     105
     106    else if( type == KMEM_512_BYTES )     return 512;
     107
    105108    else                                  return 0;
    106109}
     
    130133    else if( type == KMEM_SEM )           return "KMEM_SEM";
    131134    else if( type == KMEM_SEM )           return "KMEM_CONDVAR";
     135
     136    else if( type == KMEM_512_BYTES )     return "KMEM_512_BYTES";
     137
    132138    else                                  return "undefined";
    133139}
     
    193199        assert( (type < KMEM_TYPES_NR) , __FUNCTION__ , "illegal KMEM request type" );
    194200
    195         kmem_dmsg("\n[INFO] %s : enters in cluster %x for type %s / size %d\n",
    196                       __FUNCTION__ , local_cxy , kmem_type_str( type ) , size );
     201        kmem_dmsg("\n[INFO] %s : enters in cluster %x for type %s\n",
     202                      __FUNCTION__ , local_cxy , kmem_type_str( type ) );
    197203
    198204    // analyse request type
     
    202208                ptr = (void *)ppm_alloc_pages( size );
    203209
    204         // reset page if required
     210        // reset page if requested
    205211                if( flags & AF_ZERO ) page_zero( (page_t *)ptr );
    206212
     
    217223                if( flags & AF_ZERO ) memset( ptr , 0 , size );
    218224
    219         kmem_dmsg("\n[INFO] %s : exit in cluster %x for type %s / base = %x\n",
    220                   __FUNCTION__, local_cxy , kmem_type_str( type ) , (intptr_t)ptr );
     225        kmem_dmsg("\n[INFO] %s : exit in cluster %x for type %s / base = %x / size = %d\n",
     226                  __FUNCTION__, local_cxy , kmem_type_str( type ) ,
     227                 (intptr_t)ptr , req->size );
    221228        }
    222229    else                                           // KCM allocator
     
    237244                if( flags & AF_ZERO ) memset( ptr , 0 , kmem_type_size( type ) );
    238245
    239         kmem_dmsg("\n[INFO] %s : exit in cluster %x for type %s / base = %x\n",
    240                   __FUNCTION__, local_cxy , kmem_type_str( type ) , (intptr_t)ptr );
     246        kmem_dmsg("\n[INFO] %s : exit in cluster %x for type %s / base = %x / size = %d\n",
     247                  __FUNCTION__, local_cxy , kmem_type_str( type ) ,
     248                  (intptr_t)ptr , kmem_type_size( type ) );
    241249        }
    242250
  • trunk/kernel/mm/kmem.h

    r23 r50  
    5959  KMEM_CONDVAR          = 19,  /*! remote_condvar_t                                 */
    6060
    61   KMEM_TYPES_NR         = 19,
     61  KMEM_512_BYTES        = 20,  /*! 512 bytes aligned                                */
     62 
     63  KMEM_TYPES_NR         = 21,
    6264};
    6365
  • trunk/kernel/mm/ppm.c

    r18 r50  
    33 *
    44 * Authors  Ghassan Almaless (2008,2009,2010,2011,2012)
    5  *          Alain Greiner    (2016)
     5 *          Alain Greiner    (2016,2017)
    66 *
    77 * Copyright (c) UPMC Sorbonne Universites
     
    4848}
    4949
    50 ////////////////////////////////////////////
    51 inline void * ppm_page2base( page_t * page )
     50
     51
     52/////////////////////////////////////////////
     53inline void * ppm_page2vaddr( page_t * page )
    5254{
    5355        ppm_t * ppm = &LOCAL_CLUSTER->ppm;
    54         return (void*)((page - ppm->pages_tbl) << CONFIG_PPM_PAGE_SHIFT);
    55 }
    56 
    57 ////////////////////////////////////////////
    58 inline page_t * ppm_base2page( void * base )
     56        return ppm->vaddr_base + ((page - ppm->pages_tbl) << CONFIG_PPM_PAGE_SHIFT);
     57}
     58
     59//////////////////////////////////////////////
     60inline page_t * ppm_vaddr2page( void * vaddr )
    5961{
    6062        ppm_t * ppm = &LOCAL_CLUSTER->ppm;
    61         return (ppm->pages_tbl + (((uint32_t)base ) >> CONFIG_PPM_PAGE_SHIFT));
    62 }
     63        return ppm->pages_tbl + (vaddr - ppm->vaddr_base);
     64}
     65
     66
    6367
    6468//////////////////////////////////////////
     
    7680}
    7781
     82
     83
    7884///////////////////////////////////////
    79 inline void * ppm_ppn2base( ppn_t ppn )
    80 {
    81         return (void*)( ppn << CONFIG_PPM_PAGE_SHIFT );
    82 }
    83 
    84 ////////////////////////////////////////
    85 inline ppn_t ppm_base2ppn( void * base )
    86 {
    87         return (ppn_t)( (uint32_t)base >> CONFIG_PPM_PAGE_SHIFT );
    88 }
    89 
    90 //////////////////////////////////////////////////
    91 static void ppm_free_pages_nolock( page_t * page )
     85inline void * ppm_ppn2vaddr( ppn_t ppn )
     86{
     87        ppm_t  * ppm  = &LOCAL_CLUSTER->ppm;
     88        return ppm->vaddr_base + (ppn << CONFIG_PPM_PAGE_SHIFT);
     89}
     90
     91//////////////////////////////////////////
     92inline ppn_t ppm_vaddr2ppn( void * vaddr )
     93{
     94        ppm_t  * ppm  = &LOCAL_CLUSTER->ppm;
     95        return  ( (ppm->vaddr_base - vaddr) >> CONFIG_PPM_PAGE_SHIFT );
     96}
     97
     98
     99
     100///////////////////////////////////////////
     101void ppm_free_pages_nolock( page_t * page )
    92102{
    93103        page_t   * buddy;            // searched buddy page descriptor
     
    95105        page_t   * current;          // current (merged) page descriptor
    96106        uint32_t   current_index;    // current (merged) page index
    97         uint32_t   current_order;    // current (merget) page order
     107        uint32_t   current_order;    // current (merged) page order
    98108
    99109    ppm_t    * ppm         = &LOCAL_CLUSTER->ppm;
     
    120130                list_unlink( &buddy->list );
    121131                ppm->free_pages_nr[current_order] --;
    122         ppm->total_free_pages -= (1 << current_order);
    123132
    124133        // merge buddy with current
     
    134143        list_add_first( &ppm->free_pages_root[current_order] , &current->list );
    135144        ppm->free_pages_nr[current_order] ++;
    136     ppm->total_free_pages += (1 << current_order);
    137145
    138146}  // end ppm_free_pages_nolock()
    139 
    140 //////////////////////////////
    141 void ppm_init( ppm_t    * ppm,
    142                uint32_t   pages_nr,        // total pages number
    143                uint32_t   pages_offset )   // occupied pages
    144 {
    145         uint32_t   i;
    146 
    147     // set signature
    148         ppm->signature = PPM_SIGNATURE;
    149 
    150     // initialize lock protecting the free_pages[] array
    151         spinlock_init( &ppm->free_lock );
    152 
    153     // initialize free_pages[] array as empty
    154         ppm->total_free_pages = 0;
    155         for( i = 0 ; i < CONFIG_PPM_MAX_ORDER ; i++ )
    156         {
    157                 list_root_init( &ppm->free_pages_root[i] );
    158                 ppm->free_pages_nr[i] = 0;
    159         }
    160 
    161     // initialize dirty_list as empty
    162     list_root_init( &ppm->dirty_root );
    163 
    164     // initialize pointer on page descriptors array
    165         ppm->pages_tbl = (page_t*)( pages_offset << CONFIG_PPM_PAGE_SHIFT );
    166 
    167     // compute size of pages descriptor array rounded to an integer number of pages
    168     uint32_t bytes = ARROUND_UP( pages_nr * sizeof(page_t), CONFIG_PPM_PAGE_SIZE );
    169 
    170     // compute number of pages required to store page descriptor array
    171         uint32_t pages_array  = bytes >> CONFIG_PPM_PAGE_SHIFT;
    172 
    173     // compute total number of reserved pages (kernel code & pages_tbl[])
    174         uint32_t reserved_pages = pages_offset + pages_array;
    175 
    176         // set pages numbers
    177         ppm->pages_nr      = pages_nr;
    178     ppm->pages_offset  = reserved_pages;
    179 
    180     // initialises all page descriptors in pages_tbl[]
    181         for( i = 0 ; i < pages_nr ; i++ )
    182     {
    183         page_init( &ppm->pages_tbl[i] );
    184 
    185         // TODO optimisation : make only a partial init [AG]
    186         // complete the initialisation when page is allocated [AG]
    187         // ppm->pages_tbl[i].flags = 0;
    188     }
    189 
    190     // - set PG_RESERVED flag for reserved pages (kernel code & pages_tbl[])
    191     // - release all other pages to populate the free lists
    192         for( i = 0 ; i < reserved_pages ; i++)
    193     {
    194         page_set_flag( &ppm->pages_tbl[i] , PG_RESERVED );
    195     }
    196         for( i = reserved_pages ; i < pages_nr ; i++ )
    197         {
    198             ppm_free_pages_nolock( &ppm->pages_tbl[i] );
    199 
    200         // TODO optimisation : decompose this enormous set of small pages
    201         // to a set big pages with various order values
    202         }
    203 
    204     // check consistency
    205     ppm_assert_order( ppm );
    206 
    207 } // end ppm_init()
    208147
    209148////////////////////////////////////////////
     
    216155    ppm_t    * ppm = &LOCAL_CLUSTER->ppm;
    217156
    218         assert( (ppm->signature == PPM_SIGNATURE) , __FUNCTION__ , "PPM non initialised" );
    219 
    220157        assert( (order < CONFIG_PPM_MAX_ORDER) , __FUNCTION__ , "illegal order argument" );
    221158
     
    224161    ppm_dmsg("\n[INFO] %s : enters / order = %d\n",
    225162             __FUNCTION__ , order );
    226 
    227 #if( CONFIG_PPM_DEBUG )
    228 ppm_print( ppm , "before allocation" );
    229 #endif
    230163
    231164    // take lock protecting free lists
     
    252185
    253186    // update free-lists after removing a block
    254         ppm->total_free_pages -= (1 << current_order);
    255187        ppm->free_pages_nr[current_order] --;
    256188        current_size = (1 << current_order);
     
    268200                list_add_first( &ppm->free_pages_root[current_order] , &remaining_block->list );
    269201                ppm->free_pages_nr[current_order] ++;
    270         ppm->total_free_pages += (1 << current_order);
    271202        }
    272203
     
    282213             __FUNCTION__ , (uint32_t)ppm_page2base( block ) , order );
    283214
    284 #if CONFIG_PPM_DEBUG
    285 ppm_print( ppm , "after allocation" );
    286 #endif
    287 
    288215        return block;
    289216}  // end pmm_alloc-pages()
     
    315242        spinlock_lock( &ppm->free_lock );
    316243
    317         printk("\n***  PPM state in cluster %x %s : pages = %d / offset = %d / free = %d ***\n",
    318                local_cxy , string , ppm->pages_nr , ppm->pages_offset , ppm->total_free_pages );
     244        printk("\n***  PPM in cluster %x : %d pages / &pages_tbl = %x / vaddr_base = %x ***\n",
     245               local_cxy , ppm->pages_nr , (intptr_t)ppm->pages_tbl , (intptr_t)ppm->vaddr_base );
    319246
    320247        for( order = 0 ; order < CONFIG_PPM_MAX_ORDER ; order++ )
  • trunk/kernel/mm/ppm.h

    r18 r50  
    3232#include <page.h>
    3333
    34 #define  PPM_SIGNATURE     0xBABEF00D
    3534
    3635/*****************************************************************************************
    3736 * This structure defines the Physical Memory Manager in a cluster.
    38  * In all clusters, the physical memory bank starts at address 0.
    39  * The segments kcode and kdata are mapped in the first "offset" pages.
    40  * The physical page descriptors array is implemented just after this offset zone.
    41  * The main service provided by the PMM is the dynamic allocation of physical pages.
     37 * In all clusters, the physical memory bank starts at local physical address 0.
     38 * The size of this local physical memory is defined by the <pages_nr> field in the
     39 * boot_info structure. It is split in three parts:
     40 * - the "kernel_code" section contains the kernel code, loaded by the boot-loader.
     41 *   It starts at PPN = 0 and the size is defined by the <pages_offset> field in the
     42 *   boot_info structure.
     43 * - the "pages_tbl" section contains the physical page descriptors array. It starts
     44 *   at PPN = pages_offset, and it contains one entry per small physical page in cluster.
     45 *   It is created and initialized by the hal_ppm_create() function. "the
     46 * - The "kernel_heap" section contains all physical pages that are are not in the
     47 *   in the kernel_code and pages_tbl sections, and that have not been reserved by the
     48 *   architecture specific bootloader. The reserved pages are defined in the boot_info
     49 *   structure.
     50 *
     51 * The main service provided by the PMM is the dynamic allocation of physical pages
     52 * from the "kernel_heap" section.
    4253 * This low-level allocator implements the buddy algorithm: an allocated block is
    43  * is an integer number n of 4 Kbytes pages, and n (called order) is a power of 2.
     54 * an integer number n of 4 Kbytes pages, and n (called order) is a power of 2.
    4455 ****************************************************************************************/
     56
    4557typedef struct ppm_s
    4658{
    47         uint32_t       signature;               /*! set when initialised                    */
    48         spinlock_t     free_lock;               /*! lock protecting free_pages[] array      */
     59        spinlock_t     free_lock;               /*! lock protecting free_pages[] lists      */
    4960        list_entry_t   free_pages_root[CONFIG_PPM_MAX_ORDER];  /*! roots of free lists      */
    5061        uint32_t       free_pages_nr[CONFIG_PPM_MAX_ORDER];    /*! numbers of free pages    */
    51     uint32_t       total_free_pages;        /*! total number of free pages              */
    5262        page_t       * pages_tbl;               /*! pointer on page descriptors array       */
    53         uint32_t       pages_nr;                /*! total number of 4 Kbytes physical page  */
    54     uint32_t       pages_offset;            /*! allocated pages for kcode & kdata       */
    55     uint32_t       pages_desc;              /*! allocated pages for pages_tbl[] array   */
    56     spinlock_t     dirty_lock;              /*! lock protecting the dirty list          */
     63        uint32_t       pages_nr;                /*! total number of small physical page     */
     64    spinlock_t     dirty_lock;              /*! lock protecting the dirty pages list    */
    5765    list_entry_t   dirty_root;              /*! root of dirty pages list                */
     66    void         * vaddr_base;              /*! pointer on local physical memory base   */
    5867}
    5968ppm_t;
     
    8089 * @ order        : ln2( number of 4 Kbytes pages)
    8190 * @ returns a pointer on the page descriptor if success / NULL otherwise
    82  ****************************************************************************************/
     91 **************************************************************************************à))**/
    8392page_t * ppm_alloc_pages( uint32_t order );
    8493
     
    93102
    94103/*****************************************************************************************
    95  * This function check if a page descriptor is valid.
     104 * This function check if a page descriptor pointer is valid.
    96105 *****************************************************************************************
    97106 * @ page         : pointer on a page descriptor
     
    101110
    102111/*****************************************************************************************
    103  * Get the page base address from the page descriptor pointer.
     112 * Get the page virtual address from the page descriptor pointer.
    104113 *****************************************************************************************
    105114 * @ page         : pointer to page descriptor
    106  * @ returns page base address
     115 * @ returns virtual address of page itself.
    107116 ****************************************************************************************/
    108 inline void* ppm_page2base( page_t * page );
     117inline void* ppm_page2vaddr( page_t * page );
    109118
    110119/*****************************************************************************************
    111  * Get the page descriptor pointer from the page base address.
     120 * Get the page descriptor pointer from the page virtual address.
    112121 *****************************************************************************************
    113  * @ vaddr        : page base address
     122 * @ vaddr        : page virtual address
    114123 * @ returns pointer on page descriptor
    115124 ****************************************************************************************/
    116 inline page_t * ppm_base2page( void * vaddr );
     125inline page_t * ppm_vaddr2page( void * vaddr );
    117126
    118127/*****************************************************************************************
     
    133142
    134143/*****************************************************************************************
    135  * Get the page base address from the PPN.
     144 * Get the page virtual address from the PPN.
    136145 *****************************************************************************************
    137146 * @ ppn          : physical page number
    138  * @ returns page base address
     147 * @ returns page virtual address.
    139148 ****************************************************************************************/
    140 inline void* ppm_ppn2base( ppn_t ppn );
     149inline void* ppm_ppn2vaddr( ppn_t ppn );
    141150
    142151/*****************************************************************************************
    143  * Get the PPN from the page base address.
     152 * Get the PPN from the page virtual address.
    144153 *****************************************************************************************
    145  * @ vaddr        : page base address
    146  * @ returns physical page number
     154 * @ vaddr        : page virtual address
     155 * @ returns physical page number.
    147156 ****************************************************************************************/
    148 inline ppn_t ppm_base2ppn( void * base );
     157inline ppn_t ppm_vaddr2ppn( void * base );
    149158
    150159/*****************************************************************************************
  • trunk/kernel/mm/vmm.c

    r23 r50  
    936936    offset = (uint32_t)( ((intptr_t)ptr) & CONFIG_PPM_PAGE_MASK );
    937937
    938     if( local_cxy == GET_CXY( process->ref_xp) )   // calling process is reference process
     938    if( local_cxy == GET_CXY( process->ref_xp) ) // calling process is reference process
    939939    {
    940940        error = vmm_get_pte( process, vpn , &attr , &ppn );
    941941    }
    942     else                                           // use a RPC
     942    else                                         // calling process is not reference process
    943943    {
    944944        cxy_t       ref_cxy = GET_CXY( process->ref_xp );
Note: See TracChangeset for help on using the changeset viewer.