source: trunk/kernel/mm/ppm.c @ 634

Last change on this file since 634 was 634, checked in by alain, 5 years ago

Fix a bug in hal_irqmask.c

File size: 25.2 KB
RevLine 
[1]1/*
2 * ppm.c - Per-cluster Physical Pages Manager implementation
3 *
4 * Authors  Ghassan Almaless (2008,2009,2010,2011,2012)
[632]5 *          Alain Greiner    (2016,2017,2018,2019)
[1]6 *
7 * Copyright (c) UPMC Sorbonne Universites
8 *
9 * This file is part of ALMOS-MKH.
10 *
11 * ALMOS-MKH.is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; version 2.0 of the License.
14 *
15 * ALMOS-MKH.is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with ALMOS-MKH.; if not, write to the Free Software Foundation,
22 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
23 */
24
[14]25#include <kernel_config.h>
[457]26#include <hal_kernel_types.h>
[1]27#include <hal_special.h>
28#include <printk.h>
29#include <list.h>
30#include <bits.h>
31#include <page.h>
[585]32#include <dqdt.h>
[567]33#include <busylock.h>
34#include <queuelock.h>
[1]35#include <thread.h>
36#include <cluster.h>
37#include <kmem.h>
38#include <process.h>
[567]39#include <mapper.h>
[1]40#include <ppm.h>
[606]41#include <vfs.h>
[1]42
[567]43////////////////////////////////////////////////////////////////////////////////////////
[634]44//         global variables
45////////////////////////////////////////////////////////////////////////////////////////
46
47extern chdev_directory_t    chdev_dir;          // allocated in kernel_init.c
48
49////////////////////////////////////////////////////////////////////////////////////////
[567]50//     functions to  translate [ page <-> base <-> ppn ]
51////////////////////////////////////////////////////////////////////////////////////////
52
[50]53/////////////////////////////////////////////
[315]54inline xptr_t ppm_page2base( xptr_t page_xp )
[1]55{
[315]56        ppm_t  * ppm      = &LOCAL_CLUSTER->ppm;
[1]57
[315]58    cxy_t    page_cxy = GET_CXY( page_xp );
[437]59    page_t * page_ptr = GET_PTR( page_xp );
[315]60
[406]61   void   * base_ptr = ppm->vaddr_base + 
62                       ((page_ptr - ppm->pages_tbl)<<CONFIG_PPM_PAGE_SHIFT);
63
[315]64        return XPTR( page_cxy , base_ptr );
65
66} // end ppm_page2base()
67
68/////////////////////////////////////////////
69inline xptr_t ppm_base2page( xptr_t base_xp )
[1]70{
[315]71        ppm_t  * ppm = &LOCAL_CLUSTER->ppm;
[1]72
[315]73    cxy_t    base_cxy = GET_CXY( base_xp );
[437]74    void   * base_ptr = GET_PTR( base_xp );
[315]75
76        page_t * page_ptr = ppm->pages_tbl + 
77                        ((base_ptr - ppm->vaddr_base)>>CONFIG_PPM_PAGE_SHIFT);
78
79        return XPTR( base_cxy , page_ptr );
80
81}  // end ppm_base2page()
82
83
84
[50]85///////////////////////////////////////////
[315]86inline ppn_t ppm_page2ppn( xptr_t page_xp )
87{
88        ppm_t  * ppm      = &LOCAL_CLUSTER->ppm;
89
90    cxy_t    page_cxy = GET_CXY( page_xp );
[437]91    page_t * page_ptr = GET_PTR( page_xp );
[315]92
93    paddr_t  paddr    = PADDR( page_cxy , (page_ptr - ppm->pages_tbl)<<CONFIG_PPM_PAGE_SHIFT );
94
[437]95    return (ppn_t)(paddr >> CONFIG_PPM_PAGE_SHIFT);
[315]96
97}  // end hal_page2ppn()
98
99///////////////////////////////////////
100inline xptr_t ppm_ppn2page( ppn_t ppn )
101{
[437]102        ppm_t   * ppm  = &LOCAL_CLUSTER->ppm;
[315]103
[437]104    paddr_t  paddr = ((paddr_t)ppn) << CONFIG_PPM_PAGE_SHIFT;
[315]105
[437]106    cxy_t    cxy   = CXY_FROM_PADDR( paddr );
107    lpa_t    lpa   = LPA_FROM_PADDR( paddr );
[315]108
[437]109    return XPTR( cxy , &ppm->pages_tbl[lpa>>CONFIG_PPM_PAGE_SHIFT] );
[315]110
111}  // end hal_ppn2page
112
113
114
115///////////////////////////////////////
116inline xptr_t ppm_ppn2base( ppn_t ppn )
117{
[437]118        ppm_t  * ppm   = &LOCAL_CLUSTER->ppm;
[315]119   
[437]120    paddr_t  paddr = ((paddr_t)ppn) << CONFIG_PPM_PAGE_SHIFT;
[315]121
[437]122    cxy_t    cxy   = CXY_FROM_PADDR( paddr );
123    lpa_t    lpa   = LPA_FROM_PADDR( paddr );
[315]124
[437]125        return XPTR( cxy , (void *)ppm->vaddr_base + lpa );
[315]126
127}  // end ppm_ppn2base()
128
129///////////////////////////////////////////
130inline ppn_t ppm_base2ppn( xptr_t base_xp )
131{
132        ppm_t  * ppm      = &LOCAL_CLUSTER->ppm;
133
134    cxy_t    base_cxy = GET_CXY( base_xp );
[437]135    void   * base_ptr = GET_PTR( base_xp );
[315]136
137    paddr_t  paddr    = PADDR( base_cxy , (base_ptr - ppm->vaddr_base) );
138
[437]139    return (ppn_t)(paddr >> CONFIG_PPM_PAGE_SHIFT);
[315]140
141}  // end ppm_base2ppn()
142
143
[567]144////////////////////////////////////////////////////////////////////////////////////////
145//     functions to  allocate / release  physical pages
146////////////////////////////////////////////////////////////////////////////////////////
[315]147
148///////////////////////////////////////////
[50]149void ppm_free_pages_nolock( page_t * page )
[1]150{
[632]151        page_t   * buddy;            // searched buddy block page descriptor
152        uint32_t   buddy_index;      // buddy bloc index in page_tbl[]
153        page_t   * current;          // current (merged) block page descriptor
154        uint32_t   current_index;    // current (merged) block index in page_tbl[]
155        uint32_t   current_order;    // current (merged) block order
[7]156
[160]157        ppm_t    * ppm         = &LOCAL_CLUSTER->ppm;
158        page_t   * pages_tbl   = ppm->pages_tbl;
[1]159
[632]160assert( !page_is_flag( page , PG_FREE ) ,
161"page already released : ppn = %x\n" , ppm_page2ppn(XPTR(local_cxy,page)) );
[177]162
[632]163assert( !page_is_flag( page , PG_RESERVED ) ,
164"reserved page : ppn = %x\n" , ppm_page2ppn(XPTR(local_cxy,page)) );
[407]165
[160]166        // update released page descriptor flags
[1]167        page_set_flag( page , PG_FREE );
168
[160]169        // search the buddy page descriptor
170        // - merge with current page descriptor if found
171        // - exit to release the current page descriptor if not found
[632]172        current       = page;
[160]173        current_index = (uint32_t)(page - ppm->pages_tbl);
[18]174        for( current_order = page->order ;
[160]175             current_order < CONFIG_PPM_MAX_ORDER ;
176             current_order++ )
177        {
[7]178                buddy_index = current_index ^ (1 << current_order);
179                buddy       = pages_tbl + buddy_index;
[18]180
[632]181        // exit this loop if buddy block not found
182                if( !page_is_flag( buddy , PG_FREE ) || 
183            (buddy->order != current_order) ) break;
[1]184
[632]185                // remove buddy block from free_list
[7]186                list_unlink( &buddy->list );
[1]187                ppm->free_pages_nr[current_order] --;
[18]188
[632]189        // reset order field in buddy block page descriptor
[7]190                buddy->order = 0;
[632]191
192                // compute merged block index in page_tbl[]
[7]193                current_index &= buddy_index;
[1]194        }
[18]195
[632]196        // update pointer and order field for merged block page descriptor
[7]197        current        = pages_tbl + current_index;
198        current->order = current_order;
[1]199
[632]200        // insert merged block in free list
[7]201        list_add_first( &ppm->free_pages_root[current_order] , &current->list );
[1]202        ppm->free_pages_nr[current_order] ++;
203
[433]204}  // end ppm_free_pages_nolock()
205
[1]206////////////////////////////////////////////
207page_t * ppm_alloc_pages( uint32_t   order )
208{
[632]209        page_t   * current_block;
[160]210        uint32_t   current_order;
[1]211        uint32_t   current_size;
[632]212        page_t   * found_block; 
[551]213
[438]214#if DEBUG_PPM_ALLOC_PAGES
[611]215thread_t * this = CURRENT_THREAD;
[433]216uint32_t cycle = (uint32_t)hal_get_cycles();
[438]217if( DEBUG_PPM_ALLOC_PAGES < cycle )
[632]218printk("\n[%s] thread[%x,%x] enter for %d page(s) in cluster %x / cycle %d\n",
[634]219__FUNCTION__, this->process->pid, this->trdid, 1<<order, local_cxy, cycle );
[433]220#endif
[1]221
[438]222#if(DEBUG_PPM_ALLOC_PAGES & 0x1)
223if( DEBUG_PPM_ALLOC_PAGES < cycle )
[632]224ppm_remote_display( local_cxy );
[433]225#endif
226
[160]227        ppm_t    * ppm = &LOCAL_CLUSTER->ppm;
[1]228
[611]229// check order
230assert( (order < CONFIG_PPM_MAX_ORDER) , "illegal order argument = %d\n" , order );
[1]231
[632]232    //build extended pointer on lock protecting remote PPM
233    xptr_t lock_xp = XPTR( local_cxy , &ppm->free_lock );
[1]234
[160]235        // take lock protecting free lists
[632]236        remote_busylock_acquire( lock_xp );
[1]237
[632]238        current_block = NULL;
239
[160]240        // find a free block equal or larger to requested size
[1]241        for( current_order = order ; current_order < CONFIG_PPM_MAX_ORDER ; current_order ++ )
242        {
243                if( !list_is_empty( &ppm->free_pages_root[current_order] ) )
244                {
[632]245            // get first free block in this free_list
246                        current_block = LIST_FIRST( &ppm->free_pages_root[current_order] , page_t , list );
247
248            // remove this block from this free_list
249                        list_unlink( &current_block->list );
250
251            // register pointer on found block
252            found_block = current_block;
253
254            // update this free-list number of blocks
255                ppm->free_pages_nr[current_order] --;
256
257            // compute found block size
258                current_size = (1 << current_order);
259
260                        break; 
[1]261                }
262        }
263
[632]264        if( current_block == NULL ) // return failure if no free block found
[1]265        {
[160]266                // release lock protecting free lists
[632]267                remote_busylock_release( lock_xp );
[1]268
[438]269#if DEBUG_PPM_ALLOC_PAGES
[433]270cycle = (uint32_t)hal_get_cycles();
[438]271if( DEBUG_PPM_ALLOC_PAGES < cycle )
[632]272printk("\n[%s] thread[%x,%x] cannot allocate %d page(s) in cluster %x / cycle %d\n",
[634]273__FUNCTION__, this->process->pid, this->trdid, 1<<order, local_cxy, cycle );
[433]274#endif
275
[160]276                return NULL;
277        }
[18]278
[1]279
[632]280        // split the found block in smaller sub-blocks if required
[160]281        // and update the free-lists accordingly
[1]282        while( current_order > order )
283        {
284                current_order --;
[632]285
286        // update pointer, size, and order fiels for new free block
[1]287                current_size >>= 1;
[632]288                current_block = found_block + current_size;
289                current_block->order = current_order;
[18]290
[632]291        // insert new free block in relevant free_list
292                list_add_first( &ppm->free_pages_root[current_order] , &current_block->list );
[1]293
[632]294        // update number of blocks in free list
[1]295                ppm->free_pages_nr[current_order] ++;
296        }
[18]297
[632]298        // update found block page descriptor
299        page_clear_flag( found_block , PG_FREE );
300        page_refcount_up( found_block );
301        found_block->order = order;
[1]302
[160]303        // release lock protecting free lists
[632]304        remote_busylock_release( lock_xp );
[18]305
[585]306    // update DQDT
[632]307    dqdt_increment_pages( local_cxy , order );
[585]308
[438]309#if DEBUG_PPM_ALLOC_PAGES
[433]310cycle = (uint32_t)hal_get_cycles();
[438]311if( DEBUG_PPM_ALLOC_PAGES < cycle )
[632]312printk("\n[%s] thread[%x,%x] exit for %d page(s) in cluster %x / ppn = %x / cycle %d\n",
[611]313__FUNCTION__, this->process->pid, this->trdid, 
[634]3141<<order, ppm_page2ppn(XPTR( local_cxy , found_block )), local_cxy, cycle );
[433]315#endif
[7]316
[611]317#if(DEBUG_PPM_ALLOC_PAGES & 0x1)
318if( DEBUG_PPM_ALLOC_PAGES < cycle )
[632]319ppm_remote_display( local_cxy );
[611]320#endif
321
[632]322        return found_block;
[1]323
[433]324}  // end ppm_alloc_pages()
[1]325
[433]326
[1]327////////////////////////////////////
328void ppm_free_pages( page_t * page )
329{
330        ppm_t * ppm = &LOCAL_CLUSTER->ppm;
[18]331
[438]332#if DEBUG_PPM_FREE_PAGES
[632]333thread_t * this  = CURRENT_THREAD;
334uint32_t   cycle = (uint32_t)hal_get_cycles();
[438]335if( DEBUG_PPM_FREE_PAGES < cycle )
[632]336printk("\n[%s] thread[%x,%x] enter for %d page(s) in cluster %x / ppn %x / cycle %d\n",
[611]337__FUNCTION__, this->process->pid, this->trdid, 
[632]3381<<page->order, local_cxy, ppm_page2ppn(XPTR(local_cxy , page)), cycle );
[433]339#endif
340
[438]341#if(DEBUG_PPM_FREE_PAGES & 0x1)
342if( DEBUG_PPM_FREE_PAGES < cycle )
[632]343ppm_remote_display( local_cxy );
[433]344#endif
345
[632]346    //build extended pointer on lock protecting free_lists
347    xptr_t lock_xp = XPTR( local_cxy , &ppm->free_lock );
348
[160]349        // get lock protecting free_pages[] array
[632]350        remote_busylock_acquire( lock_xp );
[1]351
[18]352        ppm_free_pages_nolock( page );
[1]353
[632]354        // release lock protecting free_lists
355        remote_busylock_release( lock_xp );
[433]356
[585]357    // update DQDT
[632]358    dqdt_decrement_pages( local_cxy , page->order );
[585]359
[438]360#if DEBUG_PPM_FREE_PAGES
[433]361cycle = (uint32_t)hal_get_cycles();
[438]362if( DEBUG_PPM_FREE_PAGES < cycle )
[632]363printk("\n[%s] thread[%x,%x] exit for %d page(s) in cluster %x / ppn %x / cycle %d\n",
[611]364__FUNCTION__, this->process->pid, this->trdid, 
[632]3651<<page->order, local_cxy, ppm_page2ppn(XPTR(local_cxy , page)) , cycle );
[433]366#endif
367
[611]368#if(DEBUG_PPM_FREE_PAGES & 0x1)
369if( DEBUG_PPM_FREE_PAGES < cycle )
[632]370ppm_remote_display( local_cxy );
[611]371#endif
372
[567]373}  // end ppm_free_pages()
[1]374
[632]375/////////////////////////////////////////////
376xptr_t ppm_remote_alloc_pages( cxy_t     cxy,
377                               uint32_t  order )
[1]378{
[632]379        uint32_t   current_order;
380        uint32_t   current_size;
381    page_t   * current_block;   
382    page_t   * found_block;
383
[634]384#if DEBUG_PPM_REMOTE_ALLOC_PAGES
[632]385thread_t * this  = CURRENT_THREAD;
386uint32_t   cycle = (uint32_t)hal_get_cycles();
[634]387if( DEBUG_PPM_REMOTE_ALLOC_PAGES < cycle )
[632]388printk("\n[%s] thread[%x,%x] enter for %d small page(s) in cluster %x / cycle %d\n",
389__FUNCTION__, this->process->pid, this->trdid, 1<<order, cxy, cycle );
390#endif
391
[634]392#if(DEBUG_PPM_REMOTE_ALLOC_PAGES & 0x1)
393if( DEBUG_PPM_REMOTE_ALLOC_PAGES < cycle )
[632]394ppm_remote_display( cxy );
395#endif
396
397// check order
398assert( (order < CONFIG_PPM_MAX_ORDER) , "illegal order argument = %d\n" , order );
399
400    // get local pointer on PPM (same in all clusters)
401        ppm_t * ppm = &LOCAL_CLUSTER->ppm;
402
403    //build extended pointer on lock protecting remote PPM
404    xptr_t lock_xp = XPTR( cxy , &ppm->free_lock );
405
406        // take lock protecting free lists in remote cluster
407        remote_busylock_acquire( lock_xp );
408
409    current_block = NULL;   
410
411        // find in remote cluster a free block equal or larger to requested size
412        for( current_order = order ; current_order < CONFIG_PPM_MAX_ORDER ; current_order ++ )
413        {
414        // get local pointer on the root of relevant free_list in remote cluster
415        list_entry_t * root = &ppm->free_pages_root[current_order];
416
417                if( !list_remote_is_empty( cxy , root ) )
418                {
419            // get local pointer on first free page descriptor in remote cluster
420                        current_block = LIST_REMOTE_FIRST( cxy, root , page_t , list );
421
422            // remove first free page from the free-list in remote cluster
423                        list_remote_unlink( cxy , &current_block->list );
424
425            // register found block
426            found_block = current_block;
427
428                // decrement relevant free-list number of items in remote cluster
429                hal_remote_atomic_add( XPTR( cxy , &ppm->free_pages_nr[current_order] ), -1 );
430
431            // compute found block size
432                current_size = (1 << current_order);
433
434                        break;
435                }
436        }
437
438        if( current_block == NULL ) // return failure
439        {
440                // release lock protecting free lists
441                remote_busylock_release( lock_xp );
442
[634]443#if DEBUG_REMOTE_PPM_ALLOC_PAGES
[632]444cycle = (uint32_t)hal_get_cycles();
[634]445if( DEBUG_PPM_REMOTE_ALLOC_PAGES < cycle )
[632]446printk("\n[%s] thread[%x,%x] cannot allocate %d page(s) in cluster %x / cycle %d\n",
447__FUNCTION__, this->process->pid, this->trdid, 1<<order, cxy, cycle );
448#endif
449
450                return XPTR_NULL;
451        }
452
453        // split the found block in smaller sub-blocks if required
454        // and update the free-lists accordingly in remote cluster
455        while( current_order > order )
456        {
457        // update order, size, and local pointer for new free block
458                current_order --;
459                current_size >>= 1;
460                current_block = found_block + current_size;
461
462        // update new free block order field in remote cluster
463                hal_remote_s32( XPTR( cxy , &current_block->order ) , current_order );
464
465        // get local pointer on the root of the relevant free_list in remote cluster 
466        list_entry_t * root = &ppm->free_pages_root[current_order];
467
468        // insert new free block in this free_list
469                list_remote_add_first( cxy , root, &current_block->list );
470
471        // update free-list number of items in remote cluster
472        hal_remote_atomic_add( XPTR(cxy , &ppm->free_pages_nr[current_order]), 1 );
473        }
474
[634]475        // update refcount, flags and order fields in found block
[632]476        page_remote_clear_flag( XPTR( cxy , found_block ), PG_FREE );
477        page_remote_refcount_up( XPTR( cxy , found_block ) );
478        hal_remote_s32( XPTR( cxy , &found_block->order ) , order );
479   
480        // release lock protecting free lists in remote cluster
481        remote_busylock_release( lock_xp );
482
483    // update DQDT page counter in remote cluster
484    dqdt_increment_pages( cxy , order );
485
[634]486#if DEBUG_PPM_REMOTE_ALLOC_PAGES
[632]487cycle = (uint32_t)hal_get_cycles();
[634]488if( DEBUG_PPM_REMOTE_ALLOC_PAGES < cycle )
[632]489printk("\n[%s] thread[%x,%x] exit for %d page(s) / ppn = %x in cluster %x / cycle %d\n",
490__FUNCTION__, this->process->pid, this->trdid, 
4911<<order, ppm_page2ppn(XPTR( local_cxy , found_block )), cxy, cycle );
492#endif
493
[634]494#if(DEBUG_PPM_REMOTE_ALLOC_PAGES & 0x1)
495if( DEBUG_PPM_REMOTE_ALLOC_PAGES < cycle )
[632]496ppm_remote_display( cxy );
497#endif
498
499        return XPTR( cxy , found_block );
500
501}  // end ppm_remote_alloc_pages()
502
503//////////////////////////////////////////
504void ppm_remote_free_pages( cxy_t     cxy,
505                            page_t  * page )
506{
507    xptr_t     page_xp;          // extended pointer on released page descriptor
508    uint32_t   order;            // released block order
509        page_t   * buddy_ptr;        // searched buddy block page descriptor
510    uint32_t   buddy_order;      // searched buddy block order
511        uint32_t   buddy_index;      // buddy block index in page_tbl[]
512        page_t   * current_ptr;      // current (merged) block page descriptor
513        uint32_t   current_index;    // current (merged) block index in page_tbl[]
514        uint32_t   current_order;    // current (merged) block order
515
[634]516#if DEBUG_PPM_REMOTE_FREE_PAGES
[632]517thread_t * this  = CURRENT_THREAD;
518uint32_t   cycle = (uint32_t)hal_get_cycles();
[634]519if( DEBUG_PPM_REMOTE_FREE_PAGES < cycle )
520printk("\n[%s] thread[%x,%x] enter for %d page(s) / cxy %x / ppn %x / cycle %d\n",
[632]521__FUNCTION__, this->process->pid, this->trdid, 
5221<<page->order, cxy, ppm_page2ppn(XPTR(cxy , page)), cycle );
523#endif
524
[634]525#if(DEBUG_PPM_REMOTE_FREE_PAGES & 0x1)
526if( DEBUG_PPM_REMOTE_FREE_PAGES < cycle )
[632]527ppm_remote_display( cxy );
528#endif
529
530    // build extended pointer on released page descriptor
531    page_xp = XPTR( cxy , page );
532   
533    // get released page order
534    order = hal_remote_l32( XPTR( cxy , &page->order ) );
535
536    // get local pointer on PPM (same in all clusters)
537        ppm_t * ppm = &LOCAL_CLUSTER->ppm;
538
539    // build extended pointer on lock protecting remote PPM
540    xptr_t lock_xp = XPTR( cxy , &ppm->free_lock );
541
542    // get local pointer on remote PPM page_tbl[] array
543        page_t * pages_tbl = hal_remote_lpt( XPTR( cxy , &ppm->pages_tbl ) );
544
545        // get lock protecting free_pages in remote cluster
546        remote_busylock_acquire( lock_xp );
547
548assert( !page_remote_is_flag( page_xp , PG_FREE ) ,
549"page already released : ppn = %x\n" , ppm_page2ppn(XPTR(local_cxy,page)) );
550
551assert( !page_remote_is_flag( page_xp , PG_RESERVED ) ,
552"reserved page : ppn = %x\n" , ppm_page2ppn(XPTR(local_cxy,page)) );
553
554        // update released page descriptor flags
555        page_remote_set_flag( page_xp , PG_FREE );
556
557        // search the buddy page descriptor
558        // - merge with current page descriptor if found
559        // - exit to release the current page descriptor if not found
560        current_ptr   = page;
561        current_index = (uint32_t)(page - ppm->pages_tbl);
562        for( current_order = order ;
563             current_order < CONFIG_PPM_MAX_ORDER ;
564             current_order++ )
565        {
566                buddy_index = current_index ^ (1 << current_order);
567                buddy_ptr   = pages_tbl + buddy_index;
568
569        // get buddy block order
570        buddy_order = hal_remote_l32( XPTR( cxy , &buddy_ptr->order ) );
571
572        // exit loop if buddy block not found
573                if( !page_remote_is_flag( XPTR( cxy , buddy_ptr ) , PG_FREE ) || 
574            (buddy_order != current_order) ) break;
575
576                // remove buddy from free list in remote cluster
577                list_remote_unlink( cxy , &buddy_ptr->list );
578        hal_remote_atomic_add( XPTR( cxy , &ppm->free_pages_nr[current_order] ) , -1 );
579
580        // reset order field in buddy block page descriptor
581        hal_remote_s32( XPTR( cxy , &buddy_ptr->order ) , 0 );
582
583                // compute merged block index in page_tbl[] array
584                current_index &= buddy_index;
585        }
586
587        // update merged page descriptor order field
588        current_ptr = pages_tbl + current_index;
589    hal_remote_s32( XPTR( cxy , &current_ptr->order ) , current_order );
590
591        // insert merged block into relevant free list in remote cluster
592        list_remote_add_first( cxy , &ppm->free_pages_root[current_order] , &current_ptr->list );
593    hal_remote_atomic_add( XPTR( cxy , &ppm->free_pages_nr[current_order] ) , 1 );
594
595        // release lock protecting free_pages[] array
596        remote_busylock_release( lock_xp );
597
598    // update DQDT
599    dqdt_decrement_pages( cxy , page->order );
600
[634]601#if DEBUG_PPM_REMOTE_FREE_PAGES
[632]602cycle = (uint32_t)hal_get_cycles();
[634]603if( DEBUG_PPM_REMOTE_FREE_PAGES < cycle )
[632]604printk("\n[%s] thread[%x,%x] exit for %d page(s) in cluster %x / ppn %x / cycle %d\n",
605__FUNCTION__, this->process->pid, this->trdid, 
6061<<page->order, cxy, ppm_page2ppn(XPTR(cxy , page)), cycle );
607#endif
608
[634]609#if(DEBUG_PPM_REMOTE_FREE_PAGES & 0x1)
610if( DEBUG_PPM_REMOTE_FREE_PAGES < cycle )
[632]611ppm_remote_display( cxy );
612#endif
613
614}  // end ppm_remote_free_pages()
615
616////////////////////////////////////
617void ppm_remote_display( cxy_t cxy )
618{
[1]619        uint32_t       order;
620        list_entry_t * iter;
[634]621    xptr_t         page_xp;
[1]622
[433]623    ppm_t * ppm = &LOCAL_CLUSTER->ppm;
624
[632]625    // build extended pointer on lock protecting remote PPM
[634]626    xptr_t ppm_lock_xp = XPTR( cxy , &ppm->free_lock );
[1]627
[634]628    // get pointers on TXT0 chdev
629    xptr_t    txt0_xp  = chdev_dir.txt_tx[0];
630    cxy_t     txt0_cxy = GET_CXY( txt0_xp );
631    chdev_t * txt0_ptr = GET_PTR( txt0_xp );
[632]632
[634]633    // build extended pointer on remote TXT0 lock
634    xptr_t  txt_lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock );
[18]635
[634]636        // get PPM lock
637        remote_busylock_acquire( ppm_lock_xp );
638
639    // get TXT0 lock
640    remote_busylock_acquire( txt_lock_xp );
641
642        nolock_printk("\n***** PPM in cluster %x / %d pages\n", local_cxy , ppm->pages_nr );
643
[1]644        for( order = 0 ; order < CONFIG_PPM_MAX_ORDER ; order++ )
645        {
[632]646        // get number of free pages for free_list[order] in remote cluster
647        uint32_t n = hal_remote_l32( XPTR( cxy , &ppm->free_pages_nr[order] ) );
[18]648
[634]649                nolock_printk("- order = %d / n = %d\t: ", order , n );
650
[632]651                LIST_REMOTE_FOREACH( cxy , &ppm->free_pages_root[order] , iter )
[1]652                {
[634]653            // build extended pointer on page descriptor
654            page_xp = XPTR( cxy , LIST_ELEMENT( iter , page_t , list ) );
655
656            // display PPN
657                        nolock_printk("%x," , ppm_page2ppn( page_xp ) );
[1]658                }
[18]659
[634]660                nolock_printk("\n");
[1]661        }
662
[634]663        // release TXT0 lock
664        remote_busylock_release( txt_lock_xp );
665
666        // release PPM lock
667        remote_busylock_release( ppm_lock_xp );
[160]668}
[1]669
[632]670////////////////////////////////
671error_t ppm_assert_order( void )
[1]672{
673        uint32_t       order;
674        list_entry_t * iter;
675        page_t       * page;
[18]676
[632]677    ppm_t * ppm = &LOCAL_CLUSTER->ppm;
678
[407]679        for( order=0 ; order < CONFIG_PPM_MAX_ORDER ; order++ )
[1]680        {
681                if( list_is_empty( &ppm->free_pages_root[order] ) ) continue;
[18]682
[1]683                LIST_FOREACH( &ppm->free_pages_root[order] , iter )
684                {
685                        page = LIST_ELEMENT( iter , page_t , list );
[160]686                        if( page->order != order )  return -1;
[1]687                }
688        }
689
[160]690        return 0;
691}
[53]692
[567]693
694//////////////////////////////////////////////////////////////////////////////////////
695//     functions to handle  dirty physical pages
696//////////////////////////////////////////////////////////////////////////////////////
697
[606]698//////////////////////////////////////////
699bool_t ppm_page_do_dirty( xptr_t page_xp )
[567]700{
701        bool_t done = false;
702
[606]703    // get page cluster and local pointer
704    page_t * page_ptr = GET_PTR( page_xp );
705    cxy_t    page_cxy = GET_CXY( page_xp );
706
707    // get local pointer on PPM (same in all clusters)
[567]708        ppm_t * ppm = &LOCAL_CLUSTER->ppm;
709
[606]710    // build extended pointers on page lock, page flags, and PPM dirty list lock
711    xptr_t page_lock_xp  = XPTR( page_cxy , &page_ptr->lock  );   
712    xptr_t page_flags_xp = XPTR( page_cxy , &page_ptr->flags );
713    xptr_t dirty_lock_xp = XPTR( page_cxy , &ppm->dirty_lock );
714           
715        // lock the remote PPM dirty_list
716        remote_queuelock_acquire( dirty_lock_xp );
[567]717
[606]718    // lock the remote page
719    remote_busylock_acquire( page_lock_xp );
720
721    // get remote page flags
722    uint32_t flags = hal_remote_l32( page_flags_xp );
723
724        if( (flags & PG_DIRTY) == 0 )
[567]725        {
726                // set dirty flag in page descriptor
[606]727        hal_remote_s32( page_flags_xp , flags | PG_DIRTY );
[567]728
[632]729                // insert the page in the remote dirty list
730        list_remote_add_first( page_cxy , &ppm->dirty_root , &page_ptr->list );
[606]731
[567]732                done = true;
733        }
734
[606]735    // unlock the remote page
736    remote_busylock_release( page_lock_xp );
[567]737
[606]738        // unlock the remote PPM dirty_list
739        remote_queuelock_release( dirty_lock_xp );
740
[567]741        return done;
742
[606]743} // end ppm_page_do_dirty()
744
745////////////////////////////////////////////
746bool_t ppm_page_undo_dirty( xptr_t page_xp )
[567]747{
748        bool_t done = false;
749
[606]750    // get page cluster and local pointer
751    page_t * page_ptr = GET_PTR( page_xp );
752    cxy_t    page_cxy = GET_CXY( page_xp );
753
754    // get local pointer on PPM (same in all clusters)
[567]755        ppm_t * ppm = &LOCAL_CLUSTER->ppm;
756
[606]757    // build extended pointers on page lock, page flags, and PPM dirty list lock
758    xptr_t page_lock_xp  = XPTR( page_cxy , &page_ptr->lock  );
759    xptr_t page_flags_xp = XPTR( page_cxy , &page_ptr->flags );
760    xptr_t dirty_lock_xp = XPTR( page_cxy , &ppm->dirty_lock );
761           
762        // lock the remote PPM dirty_list
763        remote_queuelock_acquire( XPTR( page_cxy , &ppm->dirty_lock ) );
[567]764
[606]765    // lock the remote page
766    remote_busylock_acquire( page_lock_xp );
767
768    // get remote page flags
769    uint32_t flags = hal_remote_l32( page_flags_xp );
770
771        if( (flags & PG_DIRTY) )  // page is dirty
[567]772        {
[606]773                // reset dirty flag in page descriptor
774        hal_remote_s32( page_flags_xp , flags & (~PG_DIRTY) );
[567]775
[632]776        // remove the page from remote dirty list
777        list_remote_unlink( page_cxy , &page_ptr->list );
[606]778
[567]779                done = true;
780        }
781
[606]782    // unlock the remote page
783    remote_busylock_release( page_lock_xp );
[567]784
[606]785        // unlock the remote PPM dirty_list
786        remote_queuelock_release( dirty_lock_xp );
787
[567]788        return done;
789
[606]790}  // end ppm_page_undo_dirty()
791
792/////////////////////////////////
793void ppm_sync_dirty_pages( void )
[567]794{
[606]795        ppm_t * ppm = &LOCAL_CLUSTER->ppm;
[567]796
[606]797    // get local pointer on PPM dirty_root
798    list_entry_t * dirty_root = &ppm->dirty_root;
799
800    // build extended pointer on PPM dirty_lock
801    xptr_t dirty_lock_xp = XPTR( local_cxy , &ppm->dirty_lock );
802
[567]803        // get the PPM dirty_list lock
[606]804        remote_queuelock_acquire( dirty_lock_xp );
[567]805
806        while( !list_is_empty( &ppm->dirty_root ) )
807        {
[606]808                page_t * page = LIST_FIRST( dirty_root ,  page_t , list );
809        xptr_t   page_xp = XPTR( local_cxy , page );
[567]810
[606]811        // build extended pointer on page lock
812        xptr_t page_lock_xp = XPTR( local_cxy , &page->lock );
813
[567]814                // get the page lock
[606]815                remote_busylock_acquire( page_lock_xp );
[567]816
817                // sync the page
[606]818                vfs_fs_move_page( page_xp , false );  // from mapper to device
[567]819
820                // release the page lock
[606]821                remote_busylock_release( page_lock_xp );
[567]822        }
823
824        // release the PPM dirty_list lock
[606]825        remote_queuelock_release( dirty_lock_xp );
[567]826
[606]827}  // end ppm_sync_dirty_pages()
828
Note: See TracBrowser for help on using the repository browser.