source: trunk/kernel/mm/ppm.c @ 606

Last change on this file since 606 was 606, checked in by alain, 5 years ago

Improve the FAT32 file system to support cat, rm, cp commands.

File size: 17.3 KB
RevLine 
[1]1/*
2 * ppm.c - Per-cluster Physical Pages Manager implementation
3 *
4 * Authors  Ghassan Almaless (2008,2009,2010,2011,2012)
[567]5 *          Alain Greiner    (2016,2017,2018)
[1]6 *
7 * Copyright (c) UPMC Sorbonne Universites
8 *
9 * This file is part of ALMOS-MKH.
10 *
11 * ALMOS-MKH.is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; version 2.0 of the License.
14 *
15 * ALMOS-MKH.is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with ALMOS-MKH.; if not, write to the Free Software Foundation,
22 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
23 */
24
[14]25#include <kernel_config.h>
[457]26#include <hal_kernel_types.h>
[1]27#include <hal_special.h>
28#include <printk.h>
29#include <list.h>
30#include <bits.h>
31#include <page.h>
[585]32#include <dqdt.h>
[567]33#include <busylock.h>
34#include <queuelock.h>
[1]35#include <thread.h>
36#include <cluster.h>
37#include <kmem.h>
38#include <process.h>
[567]39#include <mapper.h>
[1]40#include <ppm.h>
[606]41#include <vfs.h>
[1]42
[567]43////////////////////////////////////////////////////////////////////////////////////////
44//     functions to  translate [ page <-> base <-> ppn ]
45////////////////////////////////////////////////////////////////////////////////////////
46
[1]47////////////////////////////////////////////////
48inline bool_t ppm_page_is_valid( page_t * page )
49{
[160]50        ppm_t    * ppm  = &LOCAL_CLUSTER->ppm;
[1]51        uint32_t   pgnr = (uint32_t)( page - ppm->pages_tbl );
52        return (pgnr <= ppm->pages_nr);
53}
54
[50]55/////////////////////////////////////////////
[315]56inline xptr_t ppm_page2base( xptr_t page_xp )
[1]57{
[315]58        ppm_t  * ppm      = &LOCAL_CLUSTER->ppm;
[1]59
[315]60    cxy_t    page_cxy = GET_CXY( page_xp );
[437]61    page_t * page_ptr = GET_PTR( page_xp );
[315]62
[406]63   void   * base_ptr = ppm->vaddr_base + 
64                       ((page_ptr - ppm->pages_tbl)<<CONFIG_PPM_PAGE_SHIFT);
65
[315]66        return XPTR( page_cxy , base_ptr );
67
68} // end ppm_page2base()
69
70/////////////////////////////////////////////
71inline xptr_t ppm_base2page( xptr_t base_xp )
[1]72{
[315]73        ppm_t  * ppm = &LOCAL_CLUSTER->ppm;
[1]74
[315]75    cxy_t    base_cxy = GET_CXY( base_xp );
[437]76    void   * base_ptr = GET_PTR( base_xp );
[315]77
78        page_t * page_ptr = ppm->pages_tbl + 
79                        ((base_ptr - ppm->vaddr_base)>>CONFIG_PPM_PAGE_SHIFT);
80
81        return XPTR( base_cxy , page_ptr );
82
83}  // end ppm_base2page()
84
85
86
[50]87///////////////////////////////////////////
[315]88inline ppn_t ppm_page2ppn( xptr_t page_xp )
89{
90        ppm_t  * ppm      = &LOCAL_CLUSTER->ppm;
91
92    cxy_t    page_cxy = GET_CXY( page_xp );
[437]93    page_t * page_ptr = GET_PTR( page_xp );
[315]94
95    paddr_t  paddr    = PADDR( page_cxy , (page_ptr - ppm->pages_tbl)<<CONFIG_PPM_PAGE_SHIFT );
96
[437]97    return (ppn_t)(paddr >> CONFIG_PPM_PAGE_SHIFT);
[315]98
99}  // end hal_page2ppn()
100
101///////////////////////////////////////
102inline xptr_t ppm_ppn2page( ppn_t ppn )
103{
[437]104        ppm_t   * ppm  = &LOCAL_CLUSTER->ppm;
[315]105
[437]106    paddr_t  paddr = ((paddr_t)ppn) << CONFIG_PPM_PAGE_SHIFT;
[315]107
[437]108    cxy_t    cxy   = CXY_FROM_PADDR( paddr );
109    lpa_t    lpa   = LPA_FROM_PADDR( paddr );
[315]110
[437]111    return XPTR( cxy , &ppm->pages_tbl[lpa>>CONFIG_PPM_PAGE_SHIFT] );
[315]112
113}  // end hal_ppn2page
114
115
116
117///////////////////////////////////////
118inline xptr_t ppm_ppn2base( ppn_t ppn )
119{
[437]120        ppm_t  * ppm   = &LOCAL_CLUSTER->ppm;
[315]121   
[437]122    paddr_t  paddr = ((paddr_t)ppn) << CONFIG_PPM_PAGE_SHIFT;
[315]123
[437]124    cxy_t    cxy   = CXY_FROM_PADDR( paddr );
125    lpa_t    lpa   = LPA_FROM_PADDR( paddr );
[315]126
[437]127        return XPTR( cxy , (void *)ppm->vaddr_base + lpa );
[315]128
129}  // end ppm_ppn2base()
130
131///////////////////////////////////////////
132inline ppn_t ppm_base2ppn( xptr_t base_xp )
133{
134        ppm_t  * ppm      = &LOCAL_CLUSTER->ppm;
135
136    cxy_t    base_cxy = GET_CXY( base_xp );
[437]137    void   * base_ptr = GET_PTR( base_xp );
[315]138
139    paddr_t  paddr    = PADDR( base_cxy , (base_ptr - ppm->vaddr_base) );
140
[437]141    return (ppn_t)(paddr >> CONFIG_PPM_PAGE_SHIFT);
[315]142
143}  // end ppm_base2ppn()
144
145
[567]146////////////////////////////////////////////////////////////////////////////////////////
147//     functions to  allocate / release  physical pages
148////////////////////////////////////////////////////////////////////////////////////////
[315]149
150///////////////////////////////////////////
[50]151void ppm_free_pages_nolock( page_t * page )
[1]152{
[7]153        page_t   * buddy;            // searched buddy page descriptor
154        uint32_t   buddy_index;      // buddy page index
155        page_t   * current;          // current (merged) page descriptor
156        uint32_t   current_index;    // current (merged) page index
[50]157        uint32_t   current_order;    // current (merged) page order
[7]158
[160]159        ppm_t    * ppm         = &LOCAL_CLUSTER->ppm;
160        page_t   * pages_tbl   = ppm->pages_tbl;
[1]161
[492]162        assert( !page_is_flag( page , PG_FREE ) ,
[407]163    "page already released : ppn = %x\n" , ppm_page2ppn(XPTR(local_cxy,page)) );
[177]164
[492]165        assert( !page_is_flag( page , PG_RESERVED ) ,
[407]166    "reserved page : ppn = %x\n" , ppm_page2ppn(XPTR(local_cxy,page)) );
167
[160]168        // update released page descriptor flags
[1]169        page_set_flag( page , PG_FREE );
170
[160]171        // search the buddy page descriptor
172        // - merge with current page descriptor if found
173        // - exit to release the current page descriptor if not found
174        current       = page ,
175        current_index = (uint32_t)(page - ppm->pages_tbl);
[18]176        for( current_order = page->order ;
[160]177             current_order < CONFIG_PPM_MAX_ORDER ;
178             current_order++ )
179        {
[7]180                buddy_index = current_index ^ (1 << current_order);
181                buddy       = pages_tbl + buddy_index;
[18]182
[7]183                if( !page_is_flag( buddy , PG_FREE ) || (buddy->order != current_order) ) break;
[1]184
[160]185                // remove buddy from free list
[7]186                list_unlink( &buddy->list );
[1]187                ppm->free_pages_nr[current_order] --;
[18]188
[160]189                // merge buddy with current
[7]190                buddy->order = 0;
191                current_index &= buddy_index;
[1]192        }
[18]193
[160]194        // update merged page descriptor order
[7]195        current        = pages_tbl + current_index;
196        current->order = current_order;
[1]197
[160]198        // insert current in free list
[7]199        list_add_first( &ppm->free_pages_root[current_order] , &current->list );
[1]200        ppm->free_pages_nr[current_order] ++;
201
[433]202}  // end ppm_free_pages_nolock()
203
[1]204////////////////////////////////////////////
205page_t * ppm_alloc_pages( uint32_t   order )
206{
[160]207        uint32_t   current_order;
[1]208        page_t   * remaining_block;
209        uint32_t   current_size;
[551]210
[438]211#if DEBUG_PPM_ALLOC_PAGES
[433]212uint32_t cycle = (uint32_t)hal_get_cycles();
[438]213if( DEBUG_PPM_ALLOC_PAGES < cycle )
[567]214printk("\n[DBG] in %s : thread %x in process %x enter for %d page(s) / cycle %d\n",
215__FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid, 1<<order, cycle );
[433]216#endif
[1]217
[438]218#if(DEBUG_PPM_ALLOC_PAGES & 0x1)
219if( DEBUG_PPM_ALLOC_PAGES < cycle )
[433]220ppm_print();
221#endif
222
[160]223        ppm_t    * ppm = &LOCAL_CLUSTER->ppm;
[1]224
[492]225        assert( (order < CONFIG_PPM_MAX_ORDER) ,
[407]226    "illegal order argument = %x\n" , order );
[1]227
[406]228        page_t * block = NULL; 
[1]229
[160]230        // take lock protecting free lists
[567]231        busylock_acquire( &ppm->free_lock );
[1]232
[160]233        // find a free block equal or larger to requested size
[1]234        for( current_order = order ; current_order < CONFIG_PPM_MAX_ORDER ; current_order ++ )
235        {
236                if( !list_is_empty( &ppm->free_pages_root[current_order] ) )
237                {
238                        block = LIST_FIRST( &ppm->free_pages_root[current_order] , page_t , list );
239                        list_unlink( &block->list );
240                        break;
241                }
242        }
243
244        if( block == NULL ) // return failure
245        {
[160]246                // release lock protecting free lists
[567]247                busylock_release( &ppm->free_lock );
[1]248
[438]249#if DEBUG_PPM_ALLOC_PAGES
[433]250cycle = (uint32_t)hal_get_cycles();
[438]251if( DEBUG_PPM_ALLOC_PAGES < cycle )
[567]252printk("\n[DBG] in %s : thread %x in process %x cannot allocate %d page(s) / cycle %d\n",
253__FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid, 1<<order, cycle );
[433]254#endif
255
[160]256                return NULL;
257        }
[18]258
[160]259        // update free-lists after removing a block
[18]260        ppm->free_pages_nr[current_order] --;
[1]261        current_size = (1 << current_order);
262
[160]263        // split the removed block in smaller sub-blocks if required
264        // and update the free-lists accordingly
[1]265        while( current_order > order )
266        {
267                current_order --;
268                current_size >>= 1;
[18]269
[1]270                remaining_block = block + current_size;
271                remaining_block->order = current_order;
272
273                list_add_first( &ppm->free_pages_root[current_order] , &remaining_block->list );
274                ppm->free_pages_nr[current_order] ++;
275        }
[18]276
[160]277        // update page descriptor
278        page_clear_flag( block , PG_FREE );
[1]279        page_refcount_up( block );
280        block->order = order;
281
[160]282        // release lock protecting free lists
[567]283        busylock_release( &ppm->free_lock );
[18]284
[585]285    // update DQDT
286    dqdt_increment_pages( order );
287
[438]288#if DEBUG_PPM_ALLOC_PAGES
[433]289cycle = (uint32_t)hal_get_cycles();
[438]290if( DEBUG_PPM_ALLOC_PAGES < cycle )
[567]291printk("\n[DBG] in %s : thread %x in process %x exit for %d page(s) / ppn = %x / cycle %d\n",
292__FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid, 
2931<<order, ppm_page2ppn(XPTR( local_cxy , block )), cycle );
[433]294#endif
[7]295
[1]296        return block;
297
[433]298}  // end ppm_alloc_pages()
[1]299
[433]300
[1]301////////////////////////////////////
302void ppm_free_pages( page_t * page )
303{
304        ppm_t * ppm = &LOCAL_CLUSTER->ppm;
[18]305
[438]306#if DEBUG_PPM_FREE_PAGES
[433]307uint32_t cycle = (uint32_t)hal_get_cycles();
[438]308if( DEBUG_PPM_FREE_PAGES < cycle )
[567]309printk("\n[DBG] in %s : thread %x in process %x enter for %d page(s) / ppn %x / cycle %d\n",
310__FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid, 
3111<<page->order, ppm_page2ppn(XPTR(local_cxy , page)), cycle );
[433]312#endif
313
[438]314#if(DEBUG_PPM_FREE_PAGES & 0x1)
315if( DEBUG_PPM_FREE_PAGES < cycle )
[433]316ppm_print();
317#endif
318
[160]319        // get lock protecting free_pages[] array
[567]320        busylock_acquire( &ppm->free_lock );
[1]321
[18]322        ppm_free_pages_nolock( page );
[1]323
[160]324        // release lock protecting free_pages[] array
[567]325        busylock_release( &ppm->free_lock );
[433]326
[585]327    // update DQDT
328    dqdt_decrement_pages( page->order );
329
[438]330#if DEBUG_PPM_FREE_PAGES
[433]331cycle = (uint32_t)hal_get_cycles();
[438]332if( DEBUG_PPM_FREE_PAGES < cycle )
[567]333printk("\n[DBG] in %s : thread %x in process %x exit for %d page(s) / ppn %x / cycle %d\n",
334__FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid, 
3351<<page->order, ppm_page2ppn(XPTR(local_cxy , page)), cycle );
[433]336#endif
337
[567]338}  // end ppm_free_pages()
[1]339
[567]340//////////////////////
[486]341void ppm_print( void )
[1]342{
343        uint32_t       order;
344        list_entry_t * iter;
345        page_t       * page;
346
[433]347    ppm_t * ppm = &LOCAL_CLUSTER->ppm;
348
[160]349        // get lock protecting free lists
[567]350        busylock_acquire( &ppm->free_lock );
[1]351
[433]352        printk("\n***  PPM in cluster %x : %d pages ***\n", local_cxy , ppm->pages_nr );
[18]353
[1]354        for( order = 0 ; order < CONFIG_PPM_MAX_ORDER ; order++ )
355        {
[433]356                printk("- order = %d / free_pages = %d\t: ",
[160]357                       order , ppm->free_pages_nr[order] );
[18]358
[1]359                LIST_FOREACH( &ppm->free_pages_root[order] , iter )
360                {
361                        page = LIST_ELEMENT( iter , page_t , list );
[433]362                        printk("%x," , page - ppm->pages_tbl );
[1]363                }
[18]364
[433]365                printk("\n");
[1]366        }
367
[160]368        // release lock protecting free lists
[567]369        busylock_release( &ppm->free_lock );
[160]370}
[1]371
[53]372///////////////////////////////////////
373error_t ppm_assert_order( ppm_t * ppm )
[1]374{
375        uint32_t       order;
376        list_entry_t * iter;
377        page_t       * page;
[18]378
[407]379        for( order=0 ; order < CONFIG_PPM_MAX_ORDER ; order++ )
[1]380        {
381                if( list_is_empty( &ppm->free_pages_root[order] ) ) continue;
[18]382
[1]383                LIST_FOREACH( &ppm->free_pages_root[order] , iter )
384                {
385                        page = LIST_ELEMENT( iter , page_t , list );
[160]386                        if( page->order != order )  return -1;
[1]387                }
388        }
389
[160]390        return 0;
391}
[53]392
[567]393
394//////////////////////////////////////////////////////////////////////////////////////
395//     functions to handle  dirty physical pages
396//////////////////////////////////////////////////////////////////////////////////////
397
[606]398//////////////////////////////////////////
399bool_t ppm_page_do_dirty( xptr_t page_xp )
[567]400{
401        bool_t done = false;
402
[606]403    // get page cluster and local pointer
404    page_t * page_ptr = GET_PTR( page_xp );
405    cxy_t    page_cxy = GET_CXY( page_xp );
406
407    // get local pointer on PPM (same in all clusters)
[567]408        ppm_t * ppm = &LOCAL_CLUSTER->ppm;
409
[606]410    // build extended pointers on page lock, page flags, and PPM dirty list lock
411    xptr_t page_lock_xp  = XPTR( page_cxy , &page_ptr->lock  );   
412    xptr_t page_flags_xp = XPTR( page_cxy , &page_ptr->flags );
413    xptr_t dirty_lock_xp = XPTR( page_cxy , &ppm->dirty_lock );
414           
415        // lock the remote PPM dirty_list
416        remote_queuelock_acquire( dirty_lock_xp );
[567]417
[606]418    // lock the remote page
419    remote_busylock_acquire( page_lock_xp );
420
421    // get remote page flags
422    uint32_t flags = hal_remote_l32( page_flags_xp );
423
424        if( (flags & PG_DIRTY) == 0 )
[567]425        {
426                // set dirty flag in page descriptor
[606]427        hal_remote_s32( page_flags_xp , flags | PG_DIRTY );
[567]428
[606]429                // The PPM dirty list is a LOCAL list !!!
430        // We must update 4 pointers to insert a new page in this list.
431        // We can use the standard LIST API when the page is local,
432        // but we cannot use the standard API if the page is remote...
433
434        if( page_cxy == local_cxy )         // locally update the PPM dirty list
435        {
436            list_add_first( &ppm->dirty_root , &page_ptr->list );
437        } 
438        else                                // remotely update the PPM dirty list
439        {
440            // get local and remote pointers on "root" list entry
441            list_entry_t * root    = &ppm->dirty_root;
442            xptr_t         root_xp = XPTR( page_cxy , root );
443
444            // get local and remote pointers on "page" list entry
445            list_entry_t * list    = &page_ptr->list;
446            xptr_t         list_xp = XPTR( page_cxy , list );
447
448            // get local and remote pointers on first dirty page
449            list_entry_t * dirt    = hal_remote_lpt( XPTR( page_cxy, &root->next ) );
450            xptr_t         dirt_xp = XPTR( page_cxy , dirt );
451
452            // set root.next, list.next, list pred, curr.pred in remote cluster
453            hal_remote_spt( root_xp                    , list );
454            hal_remote_spt( list_xp                    , dirt );
455            hal_remote_spt( list_xp + sizeof(intptr_t) , root );
456            hal_remote_spt( dirt_xp + sizeof(intptr_t) , list );
457        }
458
[567]459                done = true;
460        }
461
[606]462    // unlock the remote page
463    remote_busylock_release( page_lock_xp );
[567]464
[606]465        // unlock the remote PPM dirty_list
466        remote_queuelock_release( dirty_lock_xp );
467
[567]468        return done;
469
[606]470} // end ppm_page_do_dirty()
471
472////////////////////////////////////////////
473bool_t ppm_page_undo_dirty( xptr_t page_xp )
[567]474{
475        bool_t done = false;
476
[606]477    // get page cluster and local pointer
478    page_t * page_ptr = GET_PTR( page_xp );
479    cxy_t    page_cxy = GET_CXY( page_xp );
480
481    // get local pointer on PPM (same in all clusters)
[567]482        ppm_t * ppm = &LOCAL_CLUSTER->ppm;
483
[606]484    // build extended pointers on page lock, page flags, and PPM dirty list lock
485    xptr_t page_lock_xp  = XPTR( page_cxy , &page_ptr->lock  );
486    xptr_t page_flags_xp = XPTR( page_cxy , &page_ptr->flags );
487    xptr_t dirty_lock_xp = XPTR( page_cxy , &ppm->dirty_lock );
488           
489        // lock the remote PPM dirty_list
490        remote_queuelock_acquire( XPTR( page_cxy , &ppm->dirty_lock ) );
[567]491
[606]492    // lock the remote page
493    remote_busylock_acquire( page_lock_xp );
494
495    // get remote page flags
496    uint32_t flags = hal_remote_l32( page_flags_xp );
497
498        if( (flags & PG_DIRTY) )  // page is dirty
[567]499        {
[606]500                // reset dirty flag in page descriptor
501        hal_remote_s32( page_flags_xp , flags & (~PG_DIRTY) );
[567]502
[606]503                // The PPM dirty list is a LOCAL list !!!
504        // We must update 4 pointers to remove a page from this list.
505        // we can use the standard LIST API when the page is local,
506        // but we cannot use the standard API if the page is remote...
507
508        if( page_cxy == local_cxy )         // locally update the PPM dirty list
509        {
510            list_unlink( &page_ptr->list );
511        } 
512        else                                // remotely update the PPM dirty list
513        {
514            // get local and remote pointers on "page" list entry
515            list_entry_t * list    = &page_ptr->list;
516            xptr_t         list_xp = XPTR( page_cxy , list );
517
518            // get local and remote pointers on "next" page list entry
519            list_entry_t * next    = hal_remote_lpt( list_xp );
520            xptr_t         next_xp = XPTR( page_cxy , next );
521
522            // get local and remote pointers on "pred" page list entry
523            list_entry_t * pred    = hal_remote_lpt( list_xp + sizeof(intptr_t) );
524            xptr_t         pred_xp = XPTR( page_cxy , pred );
525
526            // set root.next, list.next, list pred, curr.pred in remote cluster
527            hal_remote_spt( pred_xp                    , next );
528            hal_remote_spt( list_xp                    , NULL );
529            hal_remote_spt( list_xp + sizeof(intptr_t) , NULL );
530            hal_remote_spt( next_xp + sizeof(intptr_t) , pred );
531        }
532
[567]533                done = true;
534        }
535
[606]536    // unlock the remote page
537    remote_busylock_release( page_lock_xp );
[567]538
[606]539        // unlock the remote PPM dirty_list
540        remote_queuelock_release( dirty_lock_xp );
541
[567]542        return done;
543
[606]544}  // end ppm_page_undo_dirty()
545
546/////////////////////////////////
547void ppm_sync_dirty_pages( void )
[567]548{
[606]549        ppm_t * ppm = &LOCAL_CLUSTER->ppm;
[567]550
[606]551    // get local pointer on PPM dirty_root
552    list_entry_t * dirty_root = &ppm->dirty_root;
553
554    // build extended pointer on PPM dirty_lock
555    xptr_t dirty_lock_xp = XPTR( local_cxy , &ppm->dirty_lock );
556
[567]557        // get the PPM dirty_list lock
[606]558        remote_queuelock_acquire( dirty_lock_xp );
[567]559
560        while( !list_is_empty( &ppm->dirty_root ) )
561        {
[606]562                page_t * page = LIST_FIRST( dirty_root ,  page_t , list );
563        xptr_t   page_xp = XPTR( local_cxy , page );
[567]564
[606]565        // build extended pointer on page lock
566        xptr_t page_lock_xp = XPTR( local_cxy , &page->lock );
567
[567]568                // get the page lock
[606]569                remote_busylock_acquire( page_lock_xp );
[567]570
571                // sync the page
[606]572                vfs_fs_move_page( page_xp , false );  // from mapper to device
[567]573
574                // release the page lock
[606]575                remote_busylock_release( page_lock_xp );
[567]576        }
577
578        // release the PPM dirty_list lock
[606]579        remote_queuelock_release( dirty_lock_xp );
[567]580
[606]581}  // end ppm_sync_dirty_pages()
582
Note: See TracBrowser for help on using the repository browser.