/* * page.c - physical page related operations implementation * * Authors Ghassan Almaless (2008,2009,2010,2011,2012) * Alain Greiner (2016,2017) * * Copyright (c) UPMC Sorbonne Universites * * This file is part of ALMOS-MKH. * * ALMOS-MKH is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2.0 of the License. * * ALMOS-MKH is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with ALMOS-MKH; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include //////////////////////////////////////// inline void page_init( page_t * page ) { page->flags = 0; page->order = 0; page->mapper = NULL; page->index = 0; page->fork_nr = 0; page->refcount = 0; spinlock_init( &page->lock ); list_entry_init( &page->list ); xlist_root_init( XPTR( local_cxy , &page->wait_root ) ); } //////////////////////////////////////////// inline void page_set_flag( page_t * page, uint32_t value ) { hal_atomic_or( (uint32_t *)&page->flags , (uint32_t)value ); } ////////////////////////////////////////////// inline void page_clear_flag( page_t * page, uint32_t value ) { hal_atomic_and( (uint32_t *)&page->flags , ~((uint32_t)value) ); } ////////////////////////////////////////////// inline bool_t page_is_flag( page_t * page, uint32_t value ) { return ( (page->flags & value) ? 1 : 0 ); } ////////////////////////////////////// bool_t page_do_dirty( page_t * page ) { bool_t done = false; ppm_t * ppm = &LOCAL_CLUSTER->ppm; // lock the PPM dirty_list spinlock_lock( &ppm->dirty_lock ); if( !page_is_flag( page , PG_DIRTY ) ) { // set dirty flag in page descriptor page_set_flag( page , PG_DIRTY ); // register page in PPM dirty list list_add_first( &ppm->dirty_root , &page->list ); done = true; } // unlock the PPM dirty_list spinlock_unlock( &ppm->dirty_lock ); return done; } //////////////////////////////////////// bool_t page_undo_dirty( page_t * page ) { bool_t done = false; ppm_t * ppm = &LOCAL_CLUSTER->ppm; // lock the dirty_list spinlock_lock( &ppm->dirty_lock ); if( page_is_flag( page , PG_DIRTY) ) { // clear dirty flag in page descriptor page_clear_flag( page , PG_DIRTY ); // remove page from PPM dirty list list_unlink( &page->list ); done = true; } // unlock the dirty_list spinlock_unlock( &ppm->dirty_lock ); return done; } ///////////////////// void sync_all_pages() { page_t * page; ppm_t * ppm = &LOCAL_CLUSTER->ppm; // lock the dirty_list spinlock_lock( &ppm->dirty_lock ); while( !list_is_empty( &ppm->dirty_root ) ) { page = LIST_FIRST( &ppm->dirty_root , page_t , list ); // unlock the dirty_list spinlock_unlock( &ppm->dirty_lock ); // lock the page page_lock( page ); // sync the page vfs_mapper_move_page( page , false ); // from mapper // unlock the page page_unlock( page ); // lock the dirty_list spinlock_lock( &ppm->dirty_lock ); } // unlock the dirty_list spinlock_unlock( &ppm->dirty_lock ); } /////////////////////////////// void page_lock( page_t * page ) { // take the spinlock protecting the PG_LOCKED flag spinlock_lock( &page->lock ); if( page_is_flag( page , PG_LOCKED ) ) // page is already locked { // get pointer on calling thread thread_t * thread = CURRENT_THREAD; // register thread in the page waiting queue xlist_add_last( XPTR( local_cxy , &page->wait_root ), XPTR( local_cxy , &thread->wait_list ) ); // release the spinlock spinlock_unlock( &page->lock ); // deschedule the calling thread thread_block( thread , THREAD_BLOCKED_PAGE ); sched_yield(); } else // page is not locked { // set the PG_LOCKED flag page_set_flag( page , PG_LOCKED ); // release the spinlock spinlock_unlock( &page->lock ); } } ///////////////////////////////// void page_unlock( page_t * page ) { // take the spinlock protecting the PG_LOCKED flag spinlock_lock( &page->lock ); // check the page waiting list bool_t is_empty = xlist_is_empty( XPTR( local_cxy , &page->wait_root ) ); if( is_empty == false ) // at least one waiting thread => resume it { // get an extended pointer on the first waiting thread xptr_t root_xp = XPTR( local_cxy , &page->wait_root ); xptr_t thread_xp = XLIST_FIRST_ELEMENT( root_xp , thread_t , wait_list ); // reactivate the first waiting thread thread_unblock( thread_xp , THREAD_BLOCKED_PAGE ); } else // no waiting thread => clear the PG_LOCKED flag { page_clear_flag( page , PG_LOCKED ); } // release the spinlock spinlock_unlock( &page->lock ); } //////////////////////////////////////////// inline void page_refcount_up( page_t *page ) { hal_atomic_add( &page->refcount , +1 ); } ////////////////////////////////////////////// inline void page_refcount_down( page_t *page ) { hal_atomic_add( &page->refcount , -1 ); } ////////////////////////////// void page_copy( page_t * dst, page_t * src ) { uint32_t size; void * src_base; void * dst_base; assert( (dst->order == src->order) , __FUNCTION__ , "src size != dst size\n" ); size = (1 << dst->order) * CONFIG_PPM_PAGE_SIZE; src_base = ppm_page2vaddr( src ); dst_base = ppm_page2vaddr( dst ); memcpy( dst_base , src_base , size ); } /////////////////////////////// void page_zero( page_t * page ) { uint32_t size; void * base; size = (1 << page->order) * CONFIG_PPM_PAGE_SIZE; base = ppm_page2vaddr( page ); memset( base , 0 , size ); } //////////////////////////////// void page_print( page_t * page ) { printk("*** Page %d : base = %x / flags = %x / order = %d / count = %d\n", page->index, ppm_page2vaddr( page ), page->flags, page->order, page->refcount ); }