/* * kcm.c - Per cluster & per type Kernel Cache Manager access functions * * Author Ghassan Almaless (2008,2009,2010,2011,2012) * Alain Greiner (2016,2017) * * Copyright (c) UPMC Sorbonne Universites * * This file is part of ALMOS-MKH. * * ALMOS-MKH is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2.0 of the License. * * ALMOS-MKH is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with ALMOS-MKH; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include #include #include #include #include #include #include #include #include #include #include #include ////////////////////////////////////////////////////////////////////////////////////// // This static function returns pointer on an allocated block from an active page. // It returns NULL if no block available in selected page. // It changes the page status if required. ////////////////////////////////////////////////////////////////////////////////////// // @ kcm : pointer on kcm allocator. // @ kcm_page : pointer on active kcm page to use. ///////////////////////////////////////////////////////////////////////////////////// static void * kcm_get_block( kcm_t * kcm, kcm_page_t * kcm_page ) { kcm_dmsg("\n[INFO] %s : enters for %s / page %x / count = %d / active = %d\n", __FUNCTION__ , kmem_type_str( kcm->type ) , (intptr_t)kcm_page , kcm_page->count , kcm_page->active ); assert( kcm_page->active , __FUNCTION__ , "kcm_page should be active" ); // get first block available int32_t index = bitmap_ffs( kcm_page->bitmap , kcm->blocks_nr ); assert( (index != -1) , __FUNCTION__ , "kcm_page should not be full" ); // allocate block bitmap_clear( kcm_page->bitmap , index ); // increase kcm_page count kcm_page->count ++; // change the kcm_page to busy if no more free block in page if( kcm_page->count >= kcm->blocks_nr ) { kcm_page->active = 0; list_unlink( &kcm_page->list); kcm->active_pages_nr --; list_add_first( &kcm->busy_root , &kcm_page->list); kcm->busy_pages_nr ++; kcm_page->busy = 1; } // compute return pointer void * ptr = (void *)((intptr_t)kcm_page + CONFIG_KCM_SLOT_SIZE + (index * kcm->block_size) ); kcm_dmsg("\n[INFO] %s : allocated one block %s / ptr = %p / page = %x / count = %d\n", __FUNCTION__ , kmem_type_str( kcm->type ) , ptr , (intptr_t)kcm_page , kcm_page->count ); return ptr; } ///////////////////////////////////////////////////////////////////////////////////// // This static function releases a previously allocated block. // It changes the kcm_page status if required. ///////////////////////////////////////////////////////////////////////////////////// // @ kcm : pointer on kcm allocator. // @ kcm_page : pointer on kcm_page. // @ ptr : pointer on block to be released. ///////////////////////////////////////////////////////////////////////////////////// static void kcm_put_block ( kcm_t * kcm, kcm_page_t * kcm_page, void * ptr ) { uint32_t index; // compute block index from block pointer index = ((uint8_t *)ptr - (uint8_t *)kcm_page - CONFIG_KCM_SLOT_SIZE) / kcm->block_size; assert( !bitmap_state( kcm_page->bitmap , index ) , __FUNCTION__ , "page already freed" ); assert( (kcm_page->count > 0) , __FUNCTION__ , "count already zero" ); bitmap_set( kcm_page->bitmap , index ); kcm_page->count --; // change the page to active if it was busy if( kcm_page->busy ) { kcm_page->busy = 0; list_unlink( &kcm_page->list ); kcm->busy_pages_nr --; list_add_last( &kcm->active_root, &kcm_page->list ); kcm->active_pages_nr ++; kcm_page->active = 1; } // change the kcm_page to free if last block in active page if( (kcm_page->active) && (kcm_page->count == 0) ) { kcm_page->active = 0; list_unlink( &kcm_page->list); kcm->active_pages_nr --; list_add_first( &kcm->free_root , &kcm_page->list); kcm->free_pages_nr ++; } } ///////////////////////////////////////////////////////////////////////////////////// // This static function allocates one page from PPM. It initializes // the kcm_page descriptor, and introduces the new kcm_page into freelist. ///////////////////////////////////////////////////////////////////////////////////// static error_t freelist_populate( kcm_t * kcm ) { page_t * page; kcm_page_t * kcm_page; kmem_req_t req; // get one page from local PPM req.type = KMEM_PAGE; req.size = 0; req.flags = AF_KERNEL; page = kmem_alloc( &req ); if( page == NULL ) { printk("\n[ERROR] in %s : failed to allocate page in cluster %d\n", __FUNCTION__ , local_cxy ); return ENOMEM; } // get page base address xptr_t base_xp = ppm_page2base( XPTR( local_cxy , page ) ); kcm_page = (kcm_page_t *)GET_PTR( base_xp ); // initialize KCM-page descriptor bitmap_set_range( kcm_page->bitmap , 0 , kcm->blocks_nr ); kcm_page->busy = 0; kcm_page->active = 0; kcm_page->count = 0; kcm_page->kcm = kcm; kcm_page->page = page; // introduce new page in free-list list_add_first( &kcm->free_root , &kcm_page->list ); kcm->free_pages_nr ++; return 0; } ///////////////////////////////////////////////////////////////////////////////////// // This private function gets one KCM page from the KCM freelist. // It populates the freelist if required. ///////////////////////////////////////////////////////////////////////////////////// static kcm_page_t * freelist_get( kcm_t * kcm ) { error_t error; kcm_page_t * kcm_page; // get a new page from PPM if freelist empty if( kcm->free_pages_nr == 0 ) { error = freelist_populate( kcm ); if( error ) return NULL; } // get first KCM page from freelist and unlink it kcm_page = LIST_FIRST( &kcm->free_root, kcm_page_t , list ); list_unlink( &kcm_page->list ); kcm->free_pages_nr --; return kcm_page; } ////////////////////////////// void kcm_init( kcm_t * kcm, uint32_t type ) { // the kcm_page descriptor mut fit in the KCM slot assert( (sizeof(kcm_page_t) <= CONFIG_KCM_SLOT_SIZE) , __FUNCTION__ , "KCM slot too small\n" ); // initialize lock spinlock_init( &kcm->lock ); // initialize KCM type kcm->type = type; // initialize KCM page lists kcm->free_pages_nr = 0; kcm->busy_pages_nr = 0; kcm->active_pages_nr = 0; list_root_init( &kcm->free_root ); list_root_init( &kcm->busy_root ); list_root_init( &kcm->active_root ); // initialize block size uint32_t block_size = ARROUND_UP( kmem_type_size( type ) , CONFIG_KCM_SLOT_SIZE ); kcm->block_size = block_size; // initialize number of blocks per page uint32_t blocks_nr = (CONFIG_PPM_PAGE_SIZE - CONFIG_KCM_SLOT_SIZE) / block_size; kcm->blocks_nr = blocks_nr; kcm_dmsg("\n[INFO] %s : KCM %s initialised / block_size = %d / blocks_nr = %d\n", __FUNCTION__ , kmem_type_str( type ) , kcm->block_size , kcm->blocks_nr ); } /////////////////////////////// void kcm_destroy( kcm_t * kcm ) { kcm_page_t * kcm_page; list_entry_t * iter; // get KCM lock spinlock_lock( &kcm->lock ); // release all free pages LIST_FOREACH( &kcm->free_root , iter ) { kcm_page = (kcm_page_t *)LIST_ELEMENT( iter , kcm_page_t , list ); list_unlink( iter ); kcm->free_pages_nr --; ppm_free_pages( kcm_page->page ); } // release all active pages LIST_FOREACH( &kcm->active_root , iter ) { kcm_page = (kcm_page_t *)LIST_ELEMENT( iter , kcm_page_t , list ); list_unlink( iter ); kcm->free_pages_nr --; ppm_free_pages( kcm_page->page ); } // release all busy pages LIST_FOREACH( &kcm->busy_root , iter ) { kcm_page = (kcm_page_t *)LIST_ELEMENT( iter , kcm_page_t , list ); list_unlink( iter ); kcm->free_pages_nr --; ppm_free_pages( kcm_page->page ); } // release KCM lock spinlock_unlock( &kcm->lock ); } /////////////////////////////// void * kcm_alloc( kcm_t * kcm ) { kcm_page_t * kcm_page; void * ptr = NULL; // pointer on block // get lock spinlock_lock( &kcm->lock ); // get an active page if( list_is_empty( &kcm->active_root ) ) // no active page => get one { // get a page from free list kcm_page = freelist_get( kcm ); if( kcm_page == NULL ) { spinlock_unlock( &kcm->lock ); return NULL; } // insert page in active list list_add_first( &kcm->active_root , &kcm_page->list ); kcm->active_pages_nr ++; kcm_page->active = 1; kcm_dmsg("\n[INFO] %s : enters for type %s at cycle %d / new page = %x / count = %d\n", __FUNCTION__ , kmem_type_str( kcm->type ) , hal_get_cycles() , (intptr_t)kcm_page , kcm_page->count ); } else // get first page from active list { // get page pointer from active list kcm_page = (kcm_page_t *)LIST_FIRST( &kcm->active_root , kcm_page_t , list ); kcm_dmsg("\n[INFO] %s : enters for type %s at cycle %d / page = %x / count = %d\n", __FUNCTION__ , kmem_type_str( kcm->type ) , hal_get_cycles() , (intptr_t)kcm_page , kcm_page->count ); } // get a block from selected active page // cannot fail, as an active page cannot be full... ptr = kcm_get_block( kcm , kcm_page ); // release lock spinlock_unlock( &kcm->lock ); return ptr; } /////////////////////////// void kcm_free( void * ptr ) { kcm_page_t * kcm_page; kcm_t * kcm; assert( (ptr != NULL) , __FUNCTION__ , "pointer cannot be NULL" ); kcm_page = (kcm_page_t *)((intptr_t)ptr & ~CONFIG_PPM_PAGE_MASK); kcm = kcm_page->kcm; // get lock spinlock_lock( &kcm->lock ); // release block kcm_put_block( kcm , kcm_page , ptr ); // release lock spinlock_unlock( &kcm->lock ); } //////////////////////////// void kcm_print (kcm_t * kcm) { printk("*** KCM type = %s / free_pages = %d / busy_pages = %d / active_pages = %d\n", kmem_type_str( kcm->type ) , kcm->free_pages_nr , kcm->busy_pages_nr , kcm->active_pages_nr ); }