source: trunk/kernel/mm/ppm.c @ 632

Last change on this file since 632 was 632, checked in by alain, 5 years ago

This version replace the RPC by direct remote memory access
for physical pages allacation/release.
It is commited before being tested.

File size: 24.3 KB
RevLine 
[1]1/*
2 * ppm.c - Per-cluster Physical Pages Manager implementation
3 *
4 * Authors  Ghassan Almaless (2008,2009,2010,2011,2012)
[632]5 *          Alain Greiner    (2016,2017,2018,2019)
[1]6 *
7 * Copyright (c) UPMC Sorbonne Universites
8 *
9 * This file is part of ALMOS-MKH.
10 *
11 * ALMOS-MKH.is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; version 2.0 of the License.
14 *
15 * ALMOS-MKH.is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with ALMOS-MKH.; if not, write to the Free Software Foundation,
22 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
23 */
24
[14]25#include <kernel_config.h>
[457]26#include <hal_kernel_types.h>
[1]27#include <hal_special.h>
28#include <printk.h>
29#include <list.h>
30#include <bits.h>
31#include <page.h>
[585]32#include <dqdt.h>
[567]33#include <busylock.h>
34#include <queuelock.h>
[1]35#include <thread.h>
36#include <cluster.h>
37#include <kmem.h>
38#include <process.h>
[567]39#include <mapper.h>
[1]40#include <ppm.h>
[606]41#include <vfs.h>
[1]42
[567]43////////////////////////////////////////////////////////////////////////////////////////
44//     functions to  translate [ page <-> base <-> ppn ]
45////////////////////////////////////////////////////////////////////////////////////////
46
[1]47
[50]48/////////////////////////////////////////////
[315]49inline xptr_t ppm_page2base( xptr_t page_xp )
[1]50{
[315]51        ppm_t  * ppm      = &LOCAL_CLUSTER->ppm;
[1]52
[315]53    cxy_t    page_cxy = GET_CXY( page_xp );
[437]54    page_t * page_ptr = GET_PTR( page_xp );
[315]55
[406]56   void   * base_ptr = ppm->vaddr_base + 
57                       ((page_ptr - ppm->pages_tbl)<<CONFIG_PPM_PAGE_SHIFT);
58
[315]59        return XPTR( page_cxy , base_ptr );
60
61} // end ppm_page2base()
62
63/////////////////////////////////////////////
64inline xptr_t ppm_base2page( xptr_t base_xp )
[1]65{
[315]66        ppm_t  * ppm = &LOCAL_CLUSTER->ppm;
[1]67
[315]68    cxy_t    base_cxy = GET_CXY( base_xp );
[437]69    void   * base_ptr = GET_PTR( base_xp );
[315]70
71        page_t * page_ptr = ppm->pages_tbl + 
72                        ((base_ptr - ppm->vaddr_base)>>CONFIG_PPM_PAGE_SHIFT);
73
74        return XPTR( base_cxy , page_ptr );
75
76}  // end ppm_base2page()
77
78
79
[50]80///////////////////////////////////////////
[315]81inline ppn_t ppm_page2ppn( xptr_t page_xp )
82{
83        ppm_t  * ppm      = &LOCAL_CLUSTER->ppm;
84
85    cxy_t    page_cxy = GET_CXY( page_xp );
[437]86    page_t * page_ptr = GET_PTR( page_xp );
[315]87
88    paddr_t  paddr    = PADDR( page_cxy , (page_ptr - ppm->pages_tbl)<<CONFIG_PPM_PAGE_SHIFT );
89
[437]90    return (ppn_t)(paddr >> CONFIG_PPM_PAGE_SHIFT);
[315]91
92}  // end hal_page2ppn()
93
94///////////////////////////////////////
95inline xptr_t ppm_ppn2page( ppn_t ppn )
96{
[437]97        ppm_t   * ppm  = &LOCAL_CLUSTER->ppm;
[315]98
[437]99    paddr_t  paddr = ((paddr_t)ppn) << CONFIG_PPM_PAGE_SHIFT;
[315]100
[437]101    cxy_t    cxy   = CXY_FROM_PADDR( paddr );
102    lpa_t    lpa   = LPA_FROM_PADDR( paddr );
[315]103
[437]104    return XPTR( cxy , &ppm->pages_tbl[lpa>>CONFIG_PPM_PAGE_SHIFT] );
[315]105
106}  // end hal_ppn2page
107
108
109
110///////////////////////////////////////
111inline xptr_t ppm_ppn2base( ppn_t ppn )
112{
[437]113        ppm_t  * ppm   = &LOCAL_CLUSTER->ppm;
[315]114   
[437]115    paddr_t  paddr = ((paddr_t)ppn) << CONFIG_PPM_PAGE_SHIFT;
[315]116
[437]117    cxy_t    cxy   = CXY_FROM_PADDR( paddr );
118    lpa_t    lpa   = LPA_FROM_PADDR( paddr );
[315]119
[437]120        return XPTR( cxy , (void *)ppm->vaddr_base + lpa );
[315]121
122}  // end ppm_ppn2base()
123
124///////////////////////////////////////////
125inline ppn_t ppm_base2ppn( xptr_t base_xp )
126{
127        ppm_t  * ppm      = &LOCAL_CLUSTER->ppm;
128
129    cxy_t    base_cxy = GET_CXY( base_xp );
[437]130    void   * base_ptr = GET_PTR( base_xp );
[315]131
132    paddr_t  paddr    = PADDR( base_cxy , (base_ptr - ppm->vaddr_base) );
133
[437]134    return (ppn_t)(paddr >> CONFIG_PPM_PAGE_SHIFT);
[315]135
136}  // end ppm_base2ppn()
137
138
[567]139////////////////////////////////////////////////////////////////////////////////////////
140//     functions to  allocate / release  physical pages
141////////////////////////////////////////////////////////////////////////////////////////
[315]142
143///////////////////////////////////////////
[50]144void ppm_free_pages_nolock( page_t * page )
[1]145{
[632]146        page_t   * buddy;            // searched buddy block page descriptor
147        uint32_t   buddy_index;      // buddy bloc index in page_tbl[]
148        page_t   * current;          // current (merged) block page descriptor
149        uint32_t   current_index;    // current (merged) block index in page_tbl[]
150        uint32_t   current_order;    // current (merged) block order
[7]151
[160]152        ppm_t    * ppm         = &LOCAL_CLUSTER->ppm;
153        page_t   * pages_tbl   = ppm->pages_tbl;
[1]154
[632]155assert( !page_is_flag( page , PG_FREE ) ,
156"page already released : ppn = %x\n" , ppm_page2ppn(XPTR(local_cxy,page)) );
[177]157
[632]158assert( !page_is_flag( page , PG_RESERVED ) ,
159"reserved page : ppn = %x\n" , ppm_page2ppn(XPTR(local_cxy,page)) );
[407]160
[160]161        // update released page descriptor flags
[1]162        page_set_flag( page , PG_FREE );
163
[160]164        // search the buddy page descriptor
165        // - merge with current page descriptor if found
166        // - exit to release the current page descriptor if not found
[632]167        current       = page;
[160]168        current_index = (uint32_t)(page - ppm->pages_tbl);
[18]169        for( current_order = page->order ;
[160]170             current_order < CONFIG_PPM_MAX_ORDER ;
171             current_order++ )
172        {
[7]173                buddy_index = current_index ^ (1 << current_order);
174                buddy       = pages_tbl + buddy_index;
[18]175
[632]176        // exit this loop if buddy block not found
177                if( !page_is_flag( buddy , PG_FREE ) || 
178            (buddy->order != current_order) ) break;
[1]179
[632]180                // remove buddy block from free_list
[7]181                list_unlink( &buddy->list );
[1]182                ppm->free_pages_nr[current_order] --;
[18]183
[632]184        // reset order field in buddy block page descriptor
[7]185                buddy->order = 0;
[632]186
187                // compute merged block index in page_tbl[]
[7]188                current_index &= buddy_index;
[1]189        }
[18]190
[632]191        // update pointer and order field for merged block page descriptor
[7]192        current        = pages_tbl + current_index;
193        current->order = current_order;
[1]194
[632]195        // insert merged block in free list
[7]196        list_add_first( &ppm->free_pages_root[current_order] , &current->list );
[1]197        ppm->free_pages_nr[current_order] ++;
198
[433]199}  // end ppm_free_pages_nolock()
200
[1]201////////////////////////////////////////////
202page_t * ppm_alloc_pages( uint32_t   order )
203{
[632]204        page_t   * current_block;
[160]205        uint32_t   current_order;
[1]206        uint32_t   current_size;
[632]207        page_t   * found_block; 
[551]208
[438]209#if DEBUG_PPM_ALLOC_PAGES
[611]210thread_t * this = CURRENT_THREAD;
[433]211uint32_t cycle = (uint32_t)hal_get_cycles();
[438]212if( DEBUG_PPM_ALLOC_PAGES < cycle )
[632]213printk("\n[%s] thread[%x,%x] enter for %d page(s) in cluster %x / cycle %d\n",
214__FUNCTION__, this->process->pid, this->trdid, 1<<order, cxy, cycle );
[433]215#endif
[1]216
[438]217#if(DEBUG_PPM_ALLOC_PAGES & 0x1)
218if( DEBUG_PPM_ALLOC_PAGES < cycle )
[632]219ppm_remote_display( local_cxy );
[433]220#endif
221
[160]222        ppm_t    * ppm = &LOCAL_CLUSTER->ppm;
[1]223
[611]224// check order
225assert( (order < CONFIG_PPM_MAX_ORDER) , "illegal order argument = %d\n" , order );
[1]226
[632]227    //build extended pointer on lock protecting remote PPM
228    xptr_t lock_xp = XPTR( local_cxy , &ppm->free_lock );
[1]229
[160]230        // take lock protecting free lists
[632]231        remote_busylock_acquire( lock_xp );
[1]232
[632]233        current_block = NULL;
234
[160]235        // find a free block equal or larger to requested size
[1]236        for( current_order = order ; current_order < CONFIG_PPM_MAX_ORDER ; current_order ++ )
237        {
238                if( !list_is_empty( &ppm->free_pages_root[current_order] ) )
239                {
[632]240            // get first free block in this free_list
241                        current_block = LIST_FIRST( &ppm->free_pages_root[current_order] , page_t , list );
242
243            // remove this block from this free_list
244                        list_unlink( &current_block->list );
245
246            // register pointer on found block
247            found_block = current_block;
248
249            // update this free-list number of blocks
250                ppm->free_pages_nr[current_order] --;
251
252            // compute found block size
253                current_size = (1 << current_order);
254
255                        break; 
[1]256                }
257        }
258
[632]259        if( current_block == NULL ) // return failure if no free block found
[1]260        {
[160]261                // release lock protecting free lists
[632]262                remote_busylock_release( lock_xp );
[1]263
[438]264#if DEBUG_PPM_ALLOC_PAGES
[433]265cycle = (uint32_t)hal_get_cycles();
[438]266if( DEBUG_PPM_ALLOC_PAGES < cycle )
[632]267printk("\n[%s] thread[%x,%x] cannot allocate %d page(s) in cluster %x / cycle %d\n",
268__FUNCTION__, this->process->pid, this->trdid, 1<<order, cxy, cycle );
[433]269#endif
270
[160]271                return NULL;
272        }
[18]273
[1]274
[632]275        // split the found block in smaller sub-blocks if required
[160]276        // and update the free-lists accordingly
[1]277        while( current_order > order )
278        {
279                current_order --;
[632]280
281        // update pointer, size, and order fiels for new free block
[1]282                current_size >>= 1;
[632]283                current_block = found_block + current_size;
284                current_block->order = current_order;
[18]285
[632]286        // insert new free block in relevant free_list
287                list_add_first( &ppm->free_pages_root[current_order] , &current_block->list );
[1]288
[632]289        // update number of blocks in free list
[1]290                ppm->free_pages_nr[current_order] ++;
291        }
[18]292
[632]293        // update found block page descriptor
294        page_clear_flag( found_block , PG_FREE );
295        page_refcount_up( found_block );
296        found_block->order = order;
[1]297
[160]298        // release lock protecting free lists
[632]299        remote_busylock_release( lock_xp );
[18]300
[585]301    // update DQDT
[632]302    dqdt_increment_pages( local_cxy , order );
[585]303
[438]304#if DEBUG_PPM_ALLOC_PAGES
[433]305cycle = (uint32_t)hal_get_cycles();
[438]306if( DEBUG_PPM_ALLOC_PAGES < cycle )
[632]307printk("\n[%s] thread[%x,%x] exit for %d page(s) in cluster %x / ppn = %x / cycle %d\n",
[611]308__FUNCTION__, this->process->pid, this->trdid, 
[632]3091<<order, ppm_page2ppn(XPTR( local_cxy , found_block )), cxy, cycle );
[433]310#endif
[7]311
[611]312#if(DEBUG_PPM_ALLOC_PAGES & 0x1)
313if( DEBUG_PPM_ALLOC_PAGES < cycle )
[632]314ppm_remote_display( local_cxy );
[611]315#endif
316
[632]317        return found_block;
[1]318
[433]319}  // end ppm_alloc_pages()
[1]320
[433]321
[1]322////////////////////////////////////
323void ppm_free_pages( page_t * page )
324{
325        ppm_t * ppm = &LOCAL_CLUSTER->ppm;
[18]326
[438]327#if DEBUG_PPM_FREE_PAGES
[632]328thread_t * this  = CURRENT_THREAD;
329uint32_t   cycle = (uint32_t)hal_get_cycles();
[438]330if( DEBUG_PPM_FREE_PAGES < cycle )
[632]331printk("\n[%s] thread[%x,%x] enter for %d page(s) in cluster %x / ppn %x / cycle %d\n",
[611]332__FUNCTION__, this->process->pid, this->trdid, 
[632]3331<<page->order, local_cxy, ppm_page2ppn(XPTR(local_cxy , page)), cycle );
[433]334#endif
335
[438]336#if(DEBUG_PPM_FREE_PAGES & 0x1)
337if( DEBUG_PPM_FREE_PAGES < cycle )
[632]338ppm_remote_display( local_cxy );
[433]339#endif
340
[632]341    //build extended pointer on lock protecting free_lists
342    xptr_t lock_xp = XPTR( local_cxy , &ppm->free_lock );
343
[160]344        // get lock protecting free_pages[] array
[632]345        remote_busylock_acquire( lock_xp );
[1]346
[18]347        ppm_free_pages_nolock( page );
[1]348
[632]349        // release lock protecting free_lists
350        remote_busylock_release( lock_xp );
[433]351
[585]352    // update DQDT
[632]353    dqdt_decrement_pages( local_cxy , page->order );
[585]354
[438]355#if DEBUG_PPM_FREE_PAGES
[433]356cycle = (uint32_t)hal_get_cycles();
[438]357if( DEBUG_PPM_FREE_PAGES < cycle )
[632]358printk("\n[%s] thread[%x,%x] exit for %d page(s) in cluster %x / ppn %x / cycle %d\n",
[611]359__FUNCTION__, this->process->pid, this->trdid, 
[632]3601<<page->order, local_cxy, ppm_page2ppn(XPTR(local_cxy , page)) , cycle );
[433]361#endif
362
[611]363#if(DEBUG_PPM_FREE_PAGES & 0x1)
364if( DEBUG_PPM_FREE_PAGES < cycle )
[632]365ppm_remote_display( local_cxy );
[611]366#endif
367
[567]368}  // end ppm_free_pages()
[1]369
[632]370/////////////////////////////////////////////
371xptr_t ppm_remote_alloc_pages( cxy_t     cxy,
372                               uint32_t  order )
[1]373{
[632]374        uint32_t   current_order;
375        uint32_t   current_size;
376    page_t   * current_block;   
377    page_t   * found_block;
378
379#if DEBUG_PPM_ALLOC_PAGES
380thread_t * this  = CURRENT_THREAD;
381uint32_t   cycle = (uint32_t)hal_get_cycles();
382if( DEBUG_PPM_ALLOC_PAGES < cycle )
383printk("\n[%s] thread[%x,%x] enter for %d small page(s) in cluster %x / cycle %d\n",
384__FUNCTION__, this->process->pid, this->trdid, 1<<order, cxy, cycle );
385#endif
386
387#if(DEBUG_PPM_ALLOC_PAGES & 0x1)
388if( DEBUG_PPM_ALLOC_PAGES < cycle )
389ppm_remote_display( cxy );
390#endif
391
392// check order
393assert( (order < CONFIG_PPM_MAX_ORDER) , "illegal order argument = %d\n" , order );
394
395    // get local pointer on PPM (same in all clusters)
396        ppm_t * ppm = &LOCAL_CLUSTER->ppm;
397
398    //build extended pointer on lock protecting remote PPM
399    xptr_t lock_xp = XPTR( cxy , &ppm->free_lock );
400
401        // take lock protecting free lists in remote cluster
402        remote_busylock_acquire( lock_xp );
403
404    current_block = NULL;   
405
406        // find in remote cluster a free block equal or larger to requested size
407        for( current_order = order ; current_order < CONFIG_PPM_MAX_ORDER ; current_order ++ )
408        {
409        // get local pointer on the root of relevant free_list in remote cluster
410        list_entry_t * root = &ppm->free_pages_root[current_order];
411
412                if( !list_remote_is_empty( cxy , root ) )
413                {
414            // get local pointer on first free page descriptor in remote cluster
415                        current_block = LIST_REMOTE_FIRST( cxy, root , page_t , list );
416
417            // remove first free page from the free-list in remote cluster
418                        list_remote_unlink( cxy , &current_block->list );
419
420            // register found block
421            found_block = current_block;
422
423                // decrement relevant free-list number of items in remote cluster
424                hal_remote_atomic_add( XPTR( cxy , &ppm->free_pages_nr[current_order] ), -1 );
425
426            // compute found block size
427                current_size = (1 << current_order);
428
429                        break;
430                }
431        }
432
433        if( current_block == NULL ) // return failure
434        {
435                // release lock protecting free lists
436                remote_busylock_release( lock_xp );
437
438#if DEBUG_PPM_ALLOC_PAGES
439cycle = (uint32_t)hal_get_cycles();
440if( DEBUG_PPM_ALLOC_PAGES < cycle )
441printk("\n[%s] thread[%x,%x] cannot allocate %d page(s) in cluster %x / cycle %d\n",
442__FUNCTION__, this->process->pid, this->trdid, 1<<order, cxy, cycle );
443#endif
444
445                return XPTR_NULL;
446        }
447
448        // split the found block in smaller sub-blocks if required
449        // and update the free-lists accordingly in remote cluster
450        while( current_order > order )
451        {
452        // update order, size, and local pointer for new free block
453                current_order --;
454                current_size >>= 1;
455                current_block = found_block + current_size;
456
457        // update new free block order field in remote cluster
458                hal_remote_s32( XPTR( cxy , &current_block->order ) , current_order );
459
460        // get local pointer on the root of the relevant free_list in remote cluster 
461        list_entry_t * root = &ppm->free_pages_root[current_order];
462
463        // insert new free block in this free_list
464                list_remote_add_first( cxy , root, &current_block->list );
465
466        // update free-list number of items in remote cluster
467        hal_remote_atomic_add( XPTR(cxy , &ppm->free_pages_nr[current_order]), 1 );
468        }
469
470        // update refcount, flags and order fields in found block remote page descriptor
471        page_remote_clear_flag( XPTR( cxy , found_block ), PG_FREE );
472        page_remote_refcount_up( XPTR( cxy , found_block ) );
473        hal_remote_s32( XPTR( cxy , &found_block->order ) , order );
474   
475        // release lock protecting free lists in remote cluster
476        remote_busylock_release( lock_xp );
477
478    // update DQDT page counter in remote cluster
479    dqdt_increment_pages( cxy , order );
480
481#if DEBUG_PPM_ALLOC_PAGES
482cycle = (uint32_t)hal_get_cycles();
483if( DEBUG_PPM_ALLOC_PAGES < cycle )
484printk("\n[%s] thread[%x,%x] exit for %d page(s) / ppn = %x in cluster %x / cycle %d\n",
485__FUNCTION__, this->process->pid, this->trdid, 
4861<<order, ppm_page2ppn(XPTR( local_cxy , found_block )), cxy, cycle );
487#endif
488
489#if(DEBUG_PPM_ALLOC_PAGES & 0x1)
490if( DEBUG_PPM_ALLOC_PAGES < cycle )
491ppm_remote_display( cxy );
492#endif
493
494        return XPTR( cxy , found_block );
495
496}  // end ppm_remote_alloc_pages()
497
498//////////////////////////////////////////
499void ppm_remote_free_pages( cxy_t     cxy,
500                            page_t  * page )
501{
502    xptr_t     page_xp;          // extended pointer on released page descriptor
503    uint32_t   order;            // released block order
504        page_t   * buddy_ptr;        // searched buddy block page descriptor
505    uint32_t   buddy_order;      // searched buddy block order
506        uint32_t   buddy_index;      // buddy block index in page_tbl[]
507        page_t   * current_ptr;      // current (merged) block page descriptor
508        uint32_t   current_index;    // current (merged) block index in page_tbl[]
509        uint32_t   current_order;    // current (merged) block order
510
511#if DEBUG_PPM_FREE_PAGES
512thread_t * this  = CURRENT_THREAD;
513uint32_t   cycle = (uint32_t)hal_get_cycles();
514if( DEBUG_PPM_FREE_PAGES < cycle )
515printk("\n[%s] thread[%x,%x] enter for %d page(s) in cluster %x / ppn %x / cycle %d\n",
516__FUNCTION__, this->process->pid, this->trdid, 
5171<<page->order, cxy, ppm_page2ppn(XPTR(cxy , page)), cycle );
518#endif
519
520#if(DEBUG_PPM_FREE_PAGES & 0x1)
521if( DEBUG_PPM_FREE_PAGES < cycle )
522ppm_remote_display( cxy );
523#endif
524
525    // build extended pointer on released page descriptor
526    page_xp = XPTR( cxy , page );
527   
528    // get released page order
529    order = hal_remote_l32( XPTR( cxy , &page->order ) );
530
531    // get local pointer on PPM (same in all clusters)
532        ppm_t * ppm = &LOCAL_CLUSTER->ppm;
533
534    // build extended pointer on lock protecting remote PPM
535    xptr_t lock_xp = XPTR( cxy , &ppm->free_lock );
536
537    // get local pointer on remote PPM page_tbl[] array
538        page_t * pages_tbl = hal_remote_lpt( XPTR( cxy , &ppm->pages_tbl ) );
539
540        // get lock protecting free_pages in remote cluster
541        remote_busylock_acquire( lock_xp );
542
543assert( !page_remote_is_flag( page_xp , PG_FREE ) ,
544"page already released : ppn = %x\n" , ppm_page2ppn(XPTR(local_cxy,page)) );
545
546assert( !page_remote_is_flag( page_xp , PG_RESERVED ) ,
547"reserved page : ppn = %x\n" , ppm_page2ppn(XPTR(local_cxy,page)) );
548
549        // update released page descriptor flags
550        page_remote_set_flag( page_xp , PG_FREE );
551
552        // search the buddy page descriptor
553        // - merge with current page descriptor if found
554        // - exit to release the current page descriptor if not found
555        current_ptr   = page;
556        current_index = (uint32_t)(page - ppm->pages_tbl);
557        for( current_order = order ;
558             current_order < CONFIG_PPM_MAX_ORDER ;
559             current_order++ )
560        {
561                buddy_index = current_index ^ (1 << current_order);
562                buddy_ptr   = pages_tbl + buddy_index;
563
564        // get buddy block order
565        buddy_order = hal_remote_l32( XPTR( cxy , &buddy_ptr->order ) );
566
567        // exit loop if buddy block not found
568                if( !page_remote_is_flag( XPTR( cxy , buddy_ptr ) , PG_FREE ) || 
569            (buddy_order != current_order) ) break;
570
571                // remove buddy from free list in remote cluster
572                list_remote_unlink( cxy , &buddy_ptr->list );
573        hal_remote_atomic_add( XPTR( cxy , &ppm->free_pages_nr[current_order] ) , -1 );
574
575        // reset order field in buddy block page descriptor
576        hal_remote_s32( XPTR( cxy , &buddy_ptr->order ) , 0 );
577
578                // compute merged block index in page_tbl[] array
579                current_index &= buddy_index;
580        }
581
582        // update merged page descriptor order field
583        current_ptr = pages_tbl + current_index;
584    hal_remote_s32( XPTR( cxy , &current_ptr->order ) , current_order );
585
586        // insert merged block into relevant free list in remote cluster
587        list_remote_add_first( cxy , &ppm->free_pages_root[current_order] , &current_ptr->list );
588    hal_remote_atomic_add( XPTR( cxy , &ppm->free_pages_nr[current_order] ) , 1 );
589
590        // release lock protecting free_pages[] array
591        remote_busylock_release( lock_xp );
592
593    // update DQDT
594    dqdt_decrement_pages( cxy , page->order );
595
596#if DEBUG_PPM_FREE_PAGES
597cycle = (uint32_t)hal_get_cycles();
598if( DEBUG_PPM_FREE_PAGES < cycle )
599printk("\n[%s] thread[%x,%x] exit for %d page(s) in cluster %x / ppn %x / cycle %d\n",
600__FUNCTION__, this->process->pid, this->trdid, 
6011<<page->order, cxy, ppm_page2ppn(XPTR(cxy , page)), cycle );
602#endif
603
604#if(DEBUG_PPM_FREE_PAGES & 0x1)
605if( DEBUG_PPM_FREE_PAGES < cycle )
606ppm_remote_display( cxy );
607#endif
608
609}  // end ppm_remote_free_pages()
610
611////////////////////////////////////
612void ppm_remote_display( cxy_t cxy )
613{
[1]614        uint32_t       order;
615        list_entry_t * iter;
616        page_t       * page;
617
[433]618    ppm_t * ppm = &LOCAL_CLUSTER->ppm;
619
[632]620    // build extended pointer on lock protecting remote PPM
621    xptr_t lock_xp = XPTR( cxy , &ppm->free_lock );
[1]622
[632]623        // get lock protecting free lists in remote cluster
624        remote_busylock_acquire( lock_xp );
625
[625]626        printk("\n***** PPM in cluster %x / %d pages\n", local_cxy , ppm->pages_nr );
[18]627
[1]628        for( order = 0 ; order < CONFIG_PPM_MAX_ORDER ; order++ )
629        {
[632]630        // get number of free pages for free_list[order] in remote cluster
631        uint32_t n = hal_remote_l32( XPTR( cxy , &ppm->free_pages_nr[order] ) );
632                printk("- order = %d / free_pages = %d\t: ", order , n );
[18]633
[632]634                LIST_REMOTE_FOREACH( cxy , &ppm->free_pages_root[order] , iter )
[1]635                {
636                        page = LIST_ELEMENT( iter , page_t , list );
[433]637                        printk("%x," , page - ppm->pages_tbl );
[1]638                }
[18]639
[433]640                printk("\n");
[1]641        }
642
[632]643        // release lock protecting free lists in remote cluster
644        remote_busylock_release( lock_xp );
[160]645}
[1]646
[632]647////////////////////////////////
648error_t ppm_assert_order( void )
[1]649{
650        uint32_t       order;
651        list_entry_t * iter;
652        page_t       * page;
[18]653
[632]654    ppm_t * ppm = &LOCAL_CLUSTER->ppm;
655
[407]656        for( order=0 ; order < CONFIG_PPM_MAX_ORDER ; order++ )
[1]657        {
658                if( list_is_empty( &ppm->free_pages_root[order] ) ) continue;
[18]659
[1]660                LIST_FOREACH( &ppm->free_pages_root[order] , iter )
661                {
662                        page = LIST_ELEMENT( iter , page_t , list );
[160]663                        if( page->order != order )  return -1;
[1]664                }
665        }
666
[160]667        return 0;
668}
[53]669
[567]670
671//////////////////////////////////////////////////////////////////////////////////////
672//     functions to handle  dirty physical pages
673//////////////////////////////////////////////////////////////////////////////////////
674
[606]675//////////////////////////////////////////
676bool_t ppm_page_do_dirty( xptr_t page_xp )
[567]677{
678        bool_t done = false;
679
[606]680    // get page cluster and local pointer
681    page_t * page_ptr = GET_PTR( page_xp );
682    cxy_t    page_cxy = GET_CXY( page_xp );
683
684    // get local pointer on PPM (same in all clusters)
[567]685        ppm_t * ppm = &LOCAL_CLUSTER->ppm;
686
[606]687    // build extended pointers on page lock, page flags, and PPM dirty list lock
688    xptr_t page_lock_xp  = XPTR( page_cxy , &page_ptr->lock  );   
689    xptr_t page_flags_xp = XPTR( page_cxy , &page_ptr->flags );
690    xptr_t dirty_lock_xp = XPTR( page_cxy , &ppm->dirty_lock );
691           
692        // lock the remote PPM dirty_list
693        remote_queuelock_acquire( dirty_lock_xp );
[567]694
[606]695    // lock the remote page
696    remote_busylock_acquire( page_lock_xp );
697
698    // get remote page flags
699    uint32_t flags = hal_remote_l32( page_flags_xp );
700
701        if( (flags & PG_DIRTY) == 0 )
[567]702        {
703                // set dirty flag in page descriptor
[606]704        hal_remote_s32( page_flags_xp , flags | PG_DIRTY );
[567]705
[632]706                // insert the page in the remote dirty list
707        list_remote_add_first( page_cxy , &ppm->dirty_root , &page_ptr->list );
[606]708
[567]709                done = true;
710        }
711
[606]712    // unlock the remote page
713    remote_busylock_release( page_lock_xp );
[567]714
[606]715        // unlock the remote PPM dirty_list
716        remote_queuelock_release( dirty_lock_xp );
717
[567]718        return done;
719
[606]720} // end ppm_page_do_dirty()
721
722////////////////////////////////////////////
723bool_t ppm_page_undo_dirty( xptr_t page_xp )
[567]724{
725        bool_t done = false;
726
[606]727    // get page cluster and local pointer
728    page_t * page_ptr = GET_PTR( page_xp );
729    cxy_t    page_cxy = GET_CXY( page_xp );
730
731    // get local pointer on PPM (same in all clusters)
[567]732        ppm_t * ppm = &LOCAL_CLUSTER->ppm;
733
[606]734    // build extended pointers on page lock, page flags, and PPM dirty list lock
735    xptr_t page_lock_xp  = XPTR( page_cxy , &page_ptr->lock  );
736    xptr_t page_flags_xp = XPTR( page_cxy , &page_ptr->flags );
737    xptr_t dirty_lock_xp = XPTR( page_cxy , &ppm->dirty_lock );
738           
739        // lock the remote PPM dirty_list
740        remote_queuelock_acquire( XPTR( page_cxy , &ppm->dirty_lock ) );
[567]741
[606]742    // lock the remote page
743    remote_busylock_acquire( page_lock_xp );
744
745    // get remote page flags
746    uint32_t flags = hal_remote_l32( page_flags_xp );
747
748        if( (flags & PG_DIRTY) )  // page is dirty
[567]749        {
[606]750                // reset dirty flag in page descriptor
751        hal_remote_s32( page_flags_xp , flags & (~PG_DIRTY) );
[567]752
[632]753        // remove the page from remote dirty list
754        list_remote_unlink( page_cxy , &page_ptr->list );
[606]755
[567]756                done = true;
757        }
758
[606]759    // unlock the remote page
760    remote_busylock_release( page_lock_xp );
[567]761
[606]762        // unlock the remote PPM dirty_list
763        remote_queuelock_release( dirty_lock_xp );
764
[567]765        return done;
766
[606]767}  // end ppm_page_undo_dirty()
768
769/////////////////////////////////
770void ppm_sync_dirty_pages( void )
[567]771{
[606]772        ppm_t * ppm = &LOCAL_CLUSTER->ppm;
[567]773
[606]774    // get local pointer on PPM dirty_root
775    list_entry_t * dirty_root = &ppm->dirty_root;
776
777    // build extended pointer on PPM dirty_lock
778    xptr_t dirty_lock_xp = XPTR( local_cxy , &ppm->dirty_lock );
779
[567]780        // get the PPM dirty_list lock
[606]781        remote_queuelock_acquire( dirty_lock_xp );
[567]782
783        while( !list_is_empty( &ppm->dirty_root ) )
784        {
[606]785                page_t * page = LIST_FIRST( dirty_root ,  page_t , list );
786        xptr_t   page_xp = XPTR( local_cxy , page );
[567]787
[606]788        // build extended pointer on page lock
789        xptr_t page_lock_xp = XPTR( local_cxy , &page->lock );
790
[567]791                // get the page lock
[606]792                remote_busylock_acquire( page_lock_xp );
[567]793
794                // sync the page
[606]795                vfs_fs_move_page( page_xp , false );  // from mapper to device
[567]796
797                // release the page lock
[606]798                remote_busylock_release( page_lock_xp );
[567]799        }
800
801        // release the PPM dirty_list lock
[606]802        remote_queuelock_release( dirty_lock_xp );
[567]803
[606]804}  // end ppm_sync_dirty_pages()
805
Note: See TracBrowser for help on using the repository browser.