/* * kcm.c - Kernel Cache Manager implementation. * * Author Alain Greiner (2016,2017,2018,2019,2020) * * Copyright (c) UPMC Sorbonne Universites * * This file is part of ALMOS-MKH. * * ALMOS-MKH is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2.0 of the License. * * ALMOS-MKH is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with ALMOS-MKH; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include #include #include #include #include #include #include #include #include #include #include #include #include /////////////////////////////////////////////////////////////////////////////////////////// // global variables /////////////////////////////////////////////////////////////////////////////////////////// extern chdev_directory_t chdev_dir; // allocated in kernel_init.c ///////////////////////////////////////////////////////////////////////////////////// // Local access functions ///////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////// // This static function is called by the kcm_alloc() function. // It returns a pointer on a block allocated from an active kcm_page. // It makes a panic if no block is available in the selected page. // It changes the page status as required. ////////////////////////////////////////////////////////////////////////////////////// // @ kcm : pointer on KCM allocator. // @ kcm_page : pointer on an active kcm_page. // @ return pointer on allocated block. ///////////////////////////////////////////////////////////////////////////////////// static void * __attribute__((noinline)) kcm_get_block( kcm_t * kcm, kcm_page_t * kcm_page ) { // initialise variables uint32_t order = kcm->order; uint32_t count = kcm_page->count; uint64_t status = kcm_page->status; // check kcm page not full assert( __FUNCTION__, (count < 63) , "kcm_page should not be full / cxy %x / order %d / count %d", local_cxy, order, count ); uint32_t index = 1; uint64_t mask = (uint64_t)0x2; // allocate first free block in kcm_page, update status, // and count , compute index of allocated block in kcm_page while( index <= 63 ) { if( (status & mask) == 0 ) // block found { // update page count and status kcm_page->status = status | mask; kcm_page->count = count + 1; break; } index++; mask <<= 1; } // switch page to full if last block if( (count + 1) == 63 ) { list_unlink( &kcm_page->list); kcm->active_pages_nr--; list_add_first( &kcm->full_root , &kcm_page->list ); kcm->full_pages_nr ++; } // compute return pointer void * ptr = (void *)((intptr_t)kcm_page + (index << order)); return ptr; } // end kcm_get_block() ///////////////////////////////////////////////////////////////////////////////////// // This static function is called by the kcm_free() function. // It releases a previously allocated block to the relevant kcm_page. // It makes a panic if the released block is not allocated in this page. // It changes the kcm_page status as required. ///////////////////////////////////////////////////////////////////////////////////// // @ kcm : pointer on kcm allocator. // @ kcm_page : pointer on kcm_page. // @ block_ptr : pointer on block to be released. ///////////////////////////////////////////////////////////////////////////////////// static void __attribute__((noinline)) kcm_put_block ( kcm_t * kcm, kcm_page_t * kcm_page, void * block_ptr ) { // initialise variables uint32_t order = kcm->order; uint32_t count = kcm_page->count; uint64_t status = kcm_page->status; // compute block index from block pointer and kcm_page pointer uint32_t index = ((intptr_t)block_ptr - (intptr_t)kcm_page) >> order; // compute mask in bit vector uint64_t mask = ((uint64_t)0x1) << index; if( (status & mask) == 0 ) { printk("\n[WARNING] in %s : block[%x,%x] not allocated / kcm %x / kcm_page %x\n", __FUNCTION__, local_cxy, block_ptr, kcm, kcm_page ); kcm_remote_display( local_cxy , kcm ); return; } // update status & count in kcm_page kcm_page->status = status & ~mask; kcm_page->count = count - 1; // switch page to active if it was full if( count == 63 ) { list_unlink( &kcm_page->list ); kcm->full_pages_nr --; list_add_last( &kcm->active_root, &kcm_page->list ); kcm->active_pages_nr ++; } } // kcm_put_block() ///////////////////////////////////////////////////////////////////////////////////// // This static function returns one non-full kcm_page with the following policy : // - if the "active_list" is non empty, it returns the first "active" page, // without modifying the KCM state. // - if the "active_list" is empty, it allocates a new page from PPM, inserts // this page in the active_list, and returns it. ///////////////////////////////////////////////////////////////////////////////////// // @ kcm : local pointer on local KCM allocator. // @ return pointer on a non-full kcm page if success / returns NULL if no memory. ///////////////////////////////////////////////////////////////////////////////////// static kcm_page_t * __attribute__((noinline)) kcm_get_page( kcm_t * kcm ) { kcm_page_t * kcm_page; uint32_t active_pages_nr = kcm->active_pages_nr; if( active_pages_nr > 0 ) // return first active page { kcm_page = LIST_FIRST( &kcm->active_root , kcm_page_t , list ); } else // allocate a new page from PPM { // get KCM order uint32_t order = kcm->order; // get one kcm_page from PPM page_t * page = ppm_alloc_pages( order + 6 - CONFIG_PPM_PAGE_ORDER ); if( page == NULL ) { #if DEBUG_KCM_ERROR printk("\n[ERROR] in %s : failed to allocate page in cluster %x\n", __FUNCTION__ , local_cxy ); #endif return NULL; } // get page base address xptr_t base_xp = ppm_page2base( XPTR( local_cxy , page ) ); // get local pointer on kcm_page kcm_page = GET_PTR( base_xp ); // initialize kcm_page descriptor kcm_page->status = 0; kcm_page->count = 0; kcm_page->kcm = kcm; kcm_page->page = page; // introduce new page in KCM active_list list_add_first( &kcm->active_root , &kcm_page->list ); kcm->active_pages_nr ++; } return kcm_page; } // end kcm_get_page() ////////////////////////////// void kcm_init( kcm_t * kcm, uint32_t order) { // check argument assert( __FUNCTION__, (order < CONFIG_PPM_PAGE_ORDER), "order argument %d too large", order ); assert( __FUNCTION__, (order >= CONFIG_CACHE_LINE_ORDER), "order argument %d too small", order ); // initialize lock remote_busylock_init( XPTR( local_cxy , &kcm->lock ) , LOCK_KCM_STATE ); // initialize KCM page lists kcm->full_pages_nr = 0; kcm->active_pages_nr = 0; list_root_init( &kcm->full_root ); list_root_init( &kcm->active_root ); // initialize order kcm->order = order; #if DEBUG_KCM if( (DEBUG_KCM_ORDER == order) && (DEBUG_KCM_CXY == local_cxy) ) printk("\n[%s] cxy %x / order %d\n", __FUNCTION__, local_cxy, order ); #endif } // end kcm_init() /////////////////////////////// void kcm_destroy( kcm_t * kcm ) { kcm_page_t * kcm_page; // build extended pointer on KCM lock xptr_t lock_xp = XPTR( local_cxy , &kcm->lock ); // get KCM lock remote_busylock_acquire( lock_xp ); // release all full pages while( list_is_empty( &kcm->full_root ) == false ) { kcm_page = LIST_FIRST( &kcm->full_root , kcm_page_t , list ); list_unlink( &kcm_page->list ); ppm_free_pages( kcm_page->page ); } // release all empty pages while( list_is_empty( &kcm->active_root ) == false ) { kcm_page = LIST_FIRST( &kcm->active_root , kcm_page_t , list ); list_unlink( &kcm_page->list ); ppm_free_pages( kcm_page->page ); } // release KCM lock remote_busylock_release( lock_xp ); } // end kcm_destroy() ////////////////////////////////// void * kcm_alloc( uint32_t order ) { kcm_t * kcm; kcm_page_t * kcm_page; void * block; // check argument assert( __FUNCTION__, (order < CONFIG_PPM_PAGE_ORDER), "order argument %d too large", order ); #if DEBUG_KCM uint32_t cycle = (uint32_t)hal_get_cycles(); #endif // smallest block size is a cache line if (order < CONFIG_CACHE_LINE_ORDER) order = CONFIG_CACHE_LINE_ORDER; // get local pointer on relevant KCM allocator kcm = &LOCAL_CLUSTER->kcm[order - CONFIG_CACHE_LINE_ORDER]; // build extended pointer on local KCM lock xptr_t lock_xp = XPTR( local_cxy , &kcm->lock ); // get KCM lock remote_busylock_acquire( lock_xp ); // get a non-full kcm_page kcm_page = kcm_get_page( kcm ); if( kcm_page == NULL ) { remote_busylock_release( lock_xp ); return NULL; } #if DEBUG_KCM if( (DEBUG_KCM_ORDER == order) && (DEBUG_KCM_CXY == local_cxy) && (DEBUG_KCM < cycle) ) printk("\n[%s] enter / order %d / kcm[%x,%x] / nb_full %d / nb_active %d\n" " page %x / status [%x,%x] / count %d\n", __FUNCTION__, order, local_cxy, kcm, kcm->full_pages_nr, kcm->active_pages_nr, kcm_page, (uint32_t)(kcm_page->status>>32), (uint32_t)(kcm_page->status), kcm_page->count ); #endif // allocate a block from selected active page block = kcm_get_block( kcm , kcm_page ); #if DEBUG_KCM if( (DEBUG_KCM_ORDER == order) && (DEBUG_KCM_CXY == local_cxy) && (DEBUG_KCM < cycle) ) printk("\n[%s] exit / order %d / kcm[%x,%x] / nb_full %d / nb_active %d\n" " page %x / status [%x,%x] / count %d\n", __FUNCTION__, order, local_cxy, kcm, kcm->full_pages_nr, kcm->active_pages_nr, kcm_page, (uint32_t)(kcm_page->status>>32), (uint32_t)(kcm_page->status), kcm_page->count ); #endif // release lock remote_busylock_release( lock_xp ); return block; } // end kcm_alloc() /////////////////////////////// void kcm_free( void * block, uint32_t order ) { kcm_t * kcm; kcm_page_t * kcm_page; // check argument assert( __FUNCTION__, (block != NULL), "block pointer cannot be NULL" ); #if DEBUG_KCM uint32_t cycle = (uint32_t)hal_get_cycles(); #endif // smallest block size is a cache line if (order < CONFIG_CACHE_LINE_ORDER) order = CONFIG_CACHE_LINE_ORDER; // get local pointer on relevant KCM allocator kcm = &LOCAL_CLUSTER->kcm[order - CONFIG_CACHE_LINE_ORDER]; // get local pointer on KCM page intptr_t kcm_page_mask = (1 << (order + 6)) - 1; kcm_page = (kcm_page_t *)((intptr_t)block & ~kcm_page_mask); // build extended pointer on local KCM lock xptr_t lock_xp = XPTR( local_cxy , &kcm->lock ); // get lock remote_busylock_acquire( lock_xp ); #if DEBUG_KCM if( (DEBUG_KCM_ORDER == order) && (DEBUG_KCM_CXY == local_cxy) && (DEBUG_KCM < cycle) ) printk("\n[%s] exit / order %d / kcm[%x,%x] / nb_full %d / nb_active %d\n" " page %x / status [%x,%x] / count %d\n", __FUNCTION__, order, local_cxy, kcm, kcm->full_pages_nr, kcm->active_pages_nr, kcm_page, (uint32_t)(kcm_page->status>>32), (uint32_t)(kcm_page->status), kcm_page->count ); #endif // release the block to the relevant page kcm_put_block( kcm , kcm_page , block ); #if DEBUG_KCM if( (DEBUG_KCM_ORDER == order) && (DEBUG_KCM_CXY == local_cxy) && (DEBUG_KCM < cycle) ) printk("\n[%s] exit / order %d / kcm[%x,%x] / nb_full %d / nb_active %d\n" " page %x / status [%x,%x] / count %d\n", __FUNCTION__, order, local_cxy, kcm, kcm->full_pages_nr, kcm->active_pages_nr, kcm_page, (uint32_t)(kcm_page->status>>32), (uint32_t)(kcm_page->status), kcm_page->count ); #endif // release lock remote_busylock_release( lock_xp ); } // end kcm_free() ///////////////////////////////////////////////////////////////////////////////////// // Remote access functions ///////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////// // This static function is called by the kcm_remote_alloc() function. // It can be called by any thread running in any cluster. // It returns a local pointer on a block allocated from an active kcm_page. // It makes a panic if no block available in the selected kcm_page. // It changes the page status as required. ///////////////////////////////////////////////////////////////////////////////////// // @ kcm_cxy : remote KCM cluster identifier. // @ kcm_ptr : local pointer on remote KCM allocator. // @ kcm_page : local pointer on remote active kcm_page to use. // @ return a local pointer on the allocated block. ///////////////////////////////////////////////////////////////////////////////////// static void * __attribute__((noinline)) kcm_remote_get_block( cxy_t kcm_cxy, kcm_t * kcm_ptr, kcm_page_t * kcm_page ) { uint32_t order = hal_remote_l32( XPTR( kcm_cxy , &kcm_ptr->order ) ); uint32_t count = hal_remote_l32( XPTR( kcm_cxy , &kcm_page->count ) ); uint64_t status = hal_remote_l64( XPTR( kcm_cxy , &kcm_page->status ) ); // check kcm_page not full assert( __FUNCTION__, (count < 63) , "kcm_page should not be full / cxy %x / order %d / count %d", kcm_cxy, order, count ); uint32_t index = 1; uint64_t mask = (uint64_t)0x2; // allocate first free block in kcm_page, update status, // and count , compute index of allocated block in kcm_page while( index <= 63 ) { if( (status & mask) == 0 ) // block found { hal_remote_s64( XPTR( kcm_cxy , &kcm_page->status ) , status | mask ); hal_remote_s32( XPTR( kcm_cxy , &kcm_page->count ) , count + 1 ); break; } index++; mask <<= 1; } // swich the page to full if last block if( (count + 1) == 63 ) { list_remote_unlink( kcm_cxy , &kcm_page->list ); hal_remote_atomic_add( XPTR( kcm_cxy , &kcm_ptr->active_pages_nr ) , -1 ); list_remote_add_first( kcm_cxy , &kcm_ptr->full_root , &kcm_page->list ); hal_remote_atomic_add( XPTR( kcm_cxy , &kcm_ptr->full_pages_nr ) , 1 ); } // compute return pointer void * ptr = (void *)((intptr_t)kcm_page + (index << order)); return ptr; } // end kcm_remote_get_block() ///////////////////////////////////////////////////////////////////////////////////// // This static function is called by the kcm_remote_free() function. // It can be called by any thread running in any cluster. // It releases a previously allocated block to the relevant kcm_page. // It changes the kcm_page status as required. ///////////////////////////////////////////////////////////////////////////////////// // @ kcm_cxy : remote KCM cluster identifier // @ kcm_ptr : local pointer on remote KCM. // @ kcm_page : local pointer on kcm_page. // @ block_ptr : pointer on block to be released. ///////////////////////////////////////////////////////////////////////////////////// static void __attribute__((noinline)) kcm_remote_put_block ( cxy_t kcm_cxy, kcm_t * kcm_ptr, kcm_page_t * kcm_page, void * block_ptr ) { uint32_t order = hal_remote_l32( XPTR( kcm_cxy , &kcm_ptr->order ) ); uint32_t count = hal_remote_l32( XPTR( kcm_cxy , &kcm_page->count ) ); uint64_t status = hal_remote_l64( XPTR( kcm_cxy , &kcm_page->status ) ); // compute block index from block pointer and kcm_page pointer uint32_t index = ((intptr_t)block_ptr - (intptr_t)kcm_page) >> order; // compute mask in bit vector uint64_t mask = ((uint64_t)0x1) << index; if( (status & mask) == 0 ) { printk("\n[WARNING] in %s : block[%x,%x] not allocated / kcm %x / kcm_page %x\n", __FUNCTION__, kcm_cxy, block_ptr, kcm_ptr, kcm_page ); kcm_remote_display( kcm_cxy , kcm_ptr ); return; } // update status & count in kcm_page hal_remote_s64( XPTR( kcm_cxy , &kcm_page->status ) , status & ~mask ); hal_remote_s32( XPTR( kcm_cxy , &kcm_page->count ) , count - 1 ); // switch the page to active if page was full if( count == 63 ) { list_remote_unlink( kcm_cxy , &kcm_page->list ); hal_remote_atomic_add( XPTR( kcm_cxy , &kcm_ptr->full_pages_nr ) , -1 ); list_remote_add_last( kcm_cxy , &kcm_ptr->active_root, &kcm_page->list ); hal_remote_atomic_add( XPTR( kcm_cxy , &kcm_ptr->active_pages_nr ) , 1 ); } } // end kcm_remote_put_block() ///////////////////////////////////////////////////////////////////////////////////// // This static function can be called by any thread running in any cluster. // It gets one non-full KCM page from the remote KCM. // It allocates a page from remote PPM to populate the freelist, and initialises // the kcm_page descriptor when required. ///////////////////////////////////////////////////////////////////////////////////// static kcm_page_t * __attribute__((noinline)) kcm_remote_get_page( cxy_t kcm_cxy, kcm_t * kcm_ptr ) { kcm_page_t * kcm_page; // local pointer on remote KCM page uint32_t active_pages_nr = hal_remote_l32( XPTR( kcm_cxy , &kcm_ptr->active_pages_nr ) ); if( active_pages_nr > 0 ) // return first active page { kcm_page = LIST_REMOTE_FIRST( kcm_cxy , &kcm_ptr->active_root , kcm_page_t , list ); } else // allocate a new page from PPM { // get KCM order uint32_t order = hal_remote_l32( XPTR( kcm_cxy , &kcm_ptr->order )); // get one kcm_page from PPM xptr_t page_xp = ppm_remote_alloc_pages( kcm_cxy, order + 6 - CONFIG_PPM_PAGE_ORDER ); if( page_xp == XPTR_NULL ) { #if DEBUG_KCM_ERROR printk("\n[ERROR] in %s : failed to allocate page in cluster %x\n", __FUNCTION__ , kcm_cxy ); #endif return NULL; } // get extended pointer on allocated buffer xptr_t base_xp = ppm_page2base( page_xp ); // get local pointer on kcm_page kcm_page = GET_PTR( base_xp ); // initialize kcm_page descriptor hal_remote_s32( XPTR( kcm_cxy , &kcm_page->count ) , 0 ); hal_remote_s64( XPTR( kcm_cxy , &kcm_page->status ) , 0 ); hal_remote_spt( XPTR( kcm_cxy , &kcm_page->kcm ) , kcm_ptr ); hal_remote_spt( XPTR( kcm_cxy , &kcm_page->page ) , GET_PTR( page_xp ) ); // introduce new page in remote KCM active_list list_remote_add_first( kcm_cxy , &kcm_ptr->active_root , &kcm_page->list ); hal_remote_atomic_add( XPTR( kcm_cxy , &kcm_ptr->active_pages_nr ) , 1 ); } return kcm_page; } // end kcm_remote_get_page() ////////////////////////////////////////// void * kcm_remote_alloc( cxy_t kcm_cxy, uint32_t order ) { kcm_t * kcm_ptr; kcm_page_t * kcm_page; void * block_ptr; // check kcm_cxy argument assert( __FUNCTION__, cluster_is_active( kcm_cxy ), "cluster %x not active", kcm_cxy ); // check order argument assert( __FUNCTION__, (order < CONFIG_PPM_PAGE_ORDER) , "order argument %d too large", order ); // smallest size is a cache line if( order < CONFIG_CACHE_LINE_ORDER ) order = CONFIG_CACHE_LINE_ORDER; // get local pointer on relevant KCM allocator (same in all clusters) kcm_ptr = &LOCAL_CLUSTER->kcm[order - 6]; // build extended pointer on remote KCM lock xptr_t lock_xp = XPTR( kcm_cxy , &kcm_ptr->lock ); // get lock remote_busylock_acquire( lock_xp ); // get a non-full kcm_page kcm_page = kcm_remote_get_page( kcm_cxy , kcm_ptr ); if( kcm_page == NULL ) { remote_busylock_release( lock_xp ); return NULL; } #if DEBUG_KCM uint32_t cycle = (uint32_t)hal_get_cycles(); uint32_t nb_full = hal_remote_l32( XPTR( kcm_cxy , &kcm_ptr->full_pages_nr )); uint32_t nb_active = hal_remote_l32( XPTR( kcm_cxy , &kcm_ptr->active_pages_nr )); uint64_t status = hal_remote_l64( XPTR( kcm_cxy , &kcm_page->status )); uint32_t count = hal_remote_l32( XPTR( kcm_cxy , &kcm_page->count )); #endif #if DEBUG_KCM if( (DEBUG_KCM_ORDER == order) && (DEBUG_KCM_CXY == local_cxy) && (DEBUG_KCM < cycle) ) printk("\n[%s] enter / order %d / kcm[%x,%x] / nb_full %d / nb_active %d\n" " page %x / status [%x,%x] / count %d\n", __FUNCTION__, order, kcm_cxy, kcm_ptr, nb_full, nb_active, kcm_page, (uint32_t)(status>>32), (uint32_t)(status), kcm_page->count ); #endif // get a block from selected active page block_ptr = kcm_remote_get_block( kcm_cxy , kcm_ptr , kcm_page ); #if DEBUG_KCM if( (DEBUG_KCM_ORDER == order) && (DEBUG_KCM_CXY == local_cxy) && (DEBUG_KCM < cycle) ) printk("\n[%s] exit / order %d / kcm[%x,%x] / nb_full %d / nb_active %d\n" " page %x / status [%x,%x] / count %d\n", __FUNCTION__, order, kcm_cxy, kcm_ptr, nb_full, nb_active, kcm_page, (uint32_t)(status>>32), (uint32_t)(status), kcm_page->count ); #endif // release lock remote_busylock_release( lock_xp ); return block_ptr; } // end kcm_remote_alloc() //////////////////////////////////////// void kcm_remote_free( cxy_t kcm_cxy, void * block_ptr, uint32_t order ) { kcm_t * kcm_ptr; kcm_page_t * kcm_page; // check kcm_cxy argument assert( __FUNCTION__, cluster_is_active( kcm_cxy ), "cluster %x not active", kcm_cxy ); // check block_ptr argument assert( __FUNCTION__, (block_ptr != NULL), "block pointer cannot be NULL" ); // check order argument assert( __FUNCTION__, (order < CONFIG_PPM_PAGE_ORDER) , "order argument %d too large", order ); // smallest block size is a cache line if (order < CONFIG_CACHE_LINE_ORDER) order = CONFIG_CACHE_LINE_ORDER; // get local pointer on relevant KCM allocator (same in all clusters) kcm_ptr = &LOCAL_CLUSTER->kcm[order - CONFIG_CACHE_LINE_ORDER]; // get local pointer on KCM page intptr_t kcm_page_mask = (1 << (order + 6)) - 1; kcm_page = (kcm_page_t *)((intptr_t)block_ptr & ~kcm_page_mask); #if DEBUG_KCM uint32_t cycle = (uint32_t)hal_get_cycles(); uint32_t nb_full = hal_remote_l32( XPTR( kcm_cxy , &kcm_ptr->full_pages_nr )); uint32_t nb_active = hal_remote_l32( XPTR( kcm_cxy , &kcm_ptr->active_pages_nr )); uint64_t status = hal_remote_l64( XPTR( kcm_cxy , &kcm_page->status )); uint32_t count = hal_remote_l32( XPTR( kcm_cxy , &kcm_page->count )); #endif // build extended pointer on remote KCM lock xptr_t lock_xp = XPTR( kcm_cxy , &kcm_ptr->lock ); // get lock remote_busylock_acquire( lock_xp ); #if DEBUG_KCM if( (DEBUG_KCM_ORDER == order) && (DEBUG_KCM_CXY == local_cxy) && (DEBUG_KCM < cycle) ) printk("\n[%s] enter / order %d / kcm[%x,%x] / nb_full %d / nb_active %d\n" " page %x / status [%x,%x] / count %d\n", __FUNCTION__, order, kcm_cxy, kcm_ptr, nb_full, nb_active, kcm_page, (uint32_t)(status>>32), (uint32_t)(status), kcm_page->count ); #endif // release the block to the relevant page kcm_remote_put_block( kcm_cxy , kcm_ptr , kcm_page , block_ptr ); #if DEBUG_KCM if( (DEBUG_KCM_ORDER == order) && (DEBUG_KCM_CXY == local_cxy) && (DEBUG_KCM < cycle) ) printk("\n[%s] exit / order %d / kcm[%x,%x] / nb_full %d / nb_active %d\n" " page %x / status [%x,%x] / count %d\n", __FUNCTION__, order, kcm_cxy, kcm_ptr, nb_full, nb_active, kcm_page, (uint32_t)(status>>32), (uint32_t)(status), kcm_page->count ); #endif // release lock remote_busylock_release( lock_xp ); } // end kcm_remote_free ///////////////////////////////////////// void kcm_remote_display( cxy_t kcm_cxy, kcm_t * kcm_ptr ) { list_entry_t * iter; kcm_page_t * kcm_page; uint64_t status; uint32_t count; // get pointers on TXT0 chdev xptr_t txt0_xp = chdev_dir.txt_tx[0]; cxy_t txt0_cxy = GET_CXY( txt0_xp ); chdev_t * txt0_ptr = GET_PTR( txt0_xp ); // get extended pointer on remote TXT0 chdev lock xptr_t txt0_lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock ); // get TXT0 lock remote_busylock_acquire( txt0_lock_xp ); uint32_t order = hal_remote_l32( XPTR( kcm_cxy , &kcm_ptr->order) ); uint32_t full_pages_nr = hal_remote_l32( XPTR( kcm_cxy , &kcm_ptr->full_pages_nr ) ); uint32_t active_pages_nr = hal_remote_l32( XPTR( kcm_cxy , &kcm_ptr->active_pages_nr ) ); nolock_printk("*** KCM : cxy %x / order %d / full_pages_nr %d / active_pages_nr %d\n", kcm_cxy, order, full_pages_nr, active_pages_nr ); if( active_pages_nr ) { LIST_REMOTE_FOREACH( kcm_cxy , &kcm_ptr->active_root , iter ) { kcm_page = LIST_ELEMENT( iter , kcm_page_t , list ); status = hal_remote_l64( XPTR( kcm_cxy , &kcm_page->status ) ); count = hal_remote_l32( XPTR( kcm_cxy , &kcm_page->count ) ); nolock_printk("- active page %x / status (%x,%x) / count %d\n", kcm_page, (uint32_t)( status<< 32 ), (uint32_t)( status ), count ); } } if( full_pages_nr ) { LIST_REMOTE_FOREACH( kcm_cxy , &kcm_ptr->full_root , iter ) { kcm_page = LIST_ELEMENT( iter , kcm_page_t , list ); status = hal_remote_l64( XPTR( kcm_cxy , &kcm_page->status ) ); count = hal_remote_l32( XPTR( kcm_cxy , &kcm_page->count ) ); nolock_printk("- full page %x / status (%x,%x) / count %d\n", kcm_page, (uint32_t)( status<< 32 ), (uint32_t)( status ), count ); } } // release TXT0 lock remote_busylock_release( txt0_lock_xp ); } // end kcm remote_display()