source: trunk/kernel/mm/ppm.c @ 635

Last change on this file since 635 was 635, checked in by alain, 16 months ago

This version is a major evolution: The physical memory allocators,
defined in the kmem.c, ppm.c, and kcm.c files have been modified
to support remote accesses. The RPCs that were previously user
to allocate physical memory in a remote cluster have been removed.
This has been done to cure a dead-lock in case of concurrent page-faults.

This version 2.2 has been tested on a (4 clusters / 2 cores per cluster)
TSAR architecture, for both the "sort" and the "fft" applications.

File size: 25.0 KB
RevLine 
[1]1/*
2 * ppm.c - Per-cluster Physical Pages Manager implementation
3 *
4 * Authors  Ghassan Almaless (2008,2009,2010,2011,2012)
[632]5 *          Alain Greiner    (2016,2017,2018,2019)
[1]6 *
7 * Copyright (c) UPMC Sorbonne Universites
8 *
9 * This file is part of ALMOS-MKH.
10 *
11 * ALMOS-MKH.is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; version 2.0 of the License.
14 *
15 * ALMOS-MKH.is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with ALMOS-MKH.; if not, write to the Free Software Foundation,
22 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
23 */
24
[14]25#include <kernel_config.h>
[457]26#include <hal_kernel_types.h>
[1]27#include <hal_special.h>
28#include <printk.h>
29#include <list.h>
30#include <bits.h>
31#include <page.h>
[585]32#include <dqdt.h>
[567]33#include <busylock.h>
34#include <queuelock.h>
[1]35#include <thread.h>
36#include <cluster.h>
37#include <kmem.h>
38#include <process.h>
[567]39#include <mapper.h>
[1]40#include <ppm.h>
[606]41#include <vfs.h>
[1]42
[567]43////////////////////////////////////////////////////////////////////////////////////////
[634]44//         global variables
45////////////////////////////////////////////////////////////////////////////////////////
46
47extern chdev_directory_t    chdev_dir;          // allocated in kernel_init.c
48
49////////////////////////////////////////////////////////////////////////////////////////
[567]50//     functions to  translate [ page <-> base <-> ppn ]
51////////////////////////////////////////////////////////////////////////////////////////
52
[50]53/////////////////////////////////////////////
[315]54inline xptr_t ppm_page2base( xptr_t page_xp )
[1]55{
[315]56        ppm_t  * ppm      = &LOCAL_CLUSTER->ppm;
[1]57
[315]58    cxy_t    page_cxy = GET_CXY( page_xp );
[437]59    page_t * page_ptr = GET_PTR( page_xp );
[315]60
[406]61   void   * base_ptr = ppm->vaddr_base + 
62                       ((page_ptr - ppm->pages_tbl)<<CONFIG_PPM_PAGE_SHIFT);
63
[315]64        return XPTR( page_cxy , base_ptr );
65
66} // end ppm_page2base()
67
68/////////////////////////////////////////////
69inline xptr_t ppm_base2page( xptr_t base_xp )
[1]70{
[315]71        ppm_t  * ppm = &LOCAL_CLUSTER->ppm;
[1]72
[315]73    cxy_t    base_cxy = GET_CXY( base_xp );
[437]74    void   * base_ptr = GET_PTR( base_xp );
[315]75
76        page_t * page_ptr = ppm->pages_tbl + 
77                        ((base_ptr - ppm->vaddr_base)>>CONFIG_PPM_PAGE_SHIFT);
78
79        return XPTR( base_cxy , page_ptr );
80
81}  // end ppm_base2page()
82
83
84
[50]85///////////////////////////////////////////
[315]86inline ppn_t ppm_page2ppn( xptr_t page_xp )
87{
88        ppm_t  * ppm      = &LOCAL_CLUSTER->ppm;
89
90    cxy_t    page_cxy = GET_CXY( page_xp );
[437]91    page_t * page_ptr = GET_PTR( page_xp );
[315]92
93    paddr_t  paddr    = PADDR( page_cxy , (page_ptr - ppm->pages_tbl)<<CONFIG_PPM_PAGE_SHIFT );
94
[437]95    return (ppn_t)(paddr >> CONFIG_PPM_PAGE_SHIFT);
[315]96
97}  // end hal_page2ppn()
98
99///////////////////////////////////////
100inline xptr_t ppm_ppn2page( ppn_t ppn )
101{
[437]102        ppm_t   * ppm  = &LOCAL_CLUSTER->ppm;
[315]103
[437]104    paddr_t  paddr = ((paddr_t)ppn) << CONFIG_PPM_PAGE_SHIFT;
[315]105
[437]106    cxy_t    cxy   = CXY_FROM_PADDR( paddr );
107    lpa_t    lpa   = LPA_FROM_PADDR( paddr );
[315]108
[437]109    return XPTR( cxy , &ppm->pages_tbl[lpa>>CONFIG_PPM_PAGE_SHIFT] );
[315]110
111}  // end hal_ppn2page
112
113
114
115///////////////////////////////////////
116inline xptr_t ppm_ppn2base( ppn_t ppn )
117{
[437]118        ppm_t  * ppm   = &LOCAL_CLUSTER->ppm;
[315]119   
[437]120    paddr_t  paddr = ((paddr_t)ppn) << CONFIG_PPM_PAGE_SHIFT;
[315]121
[437]122    cxy_t    cxy   = CXY_FROM_PADDR( paddr );
123    lpa_t    lpa   = LPA_FROM_PADDR( paddr );
[315]124
[437]125        return XPTR( cxy , (void *)ppm->vaddr_base + lpa );
[315]126
127}  // end ppm_ppn2base()
128
129///////////////////////////////////////////
130inline ppn_t ppm_base2ppn( xptr_t base_xp )
131{
132        ppm_t  * ppm      = &LOCAL_CLUSTER->ppm;
133
134    cxy_t    base_cxy = GET_CXY( base_xp );
[437]135    void   * base_ptr = GET_PTR( base_xp );
[315]136
137    paddr_t  paddr    = PADDR( base_cxy , (base_ptr - ppm->vaddr_base) );
138
[437]139    return (ppn_t)(paddr >> CONFIG_PPM_PAGE_SHIFT);
[315]140
141}  // end ppm_base2ppn()
142
143
[567]144////////////////////////////////////////////////////////////////////////////////////////
145//     functions to  allocate / release  physical pages
146////////////////////////////////////////////////////////////////////////////////////////
[315]147
148///////////////////////////////////////////
[50]149void ppm_free_pages_nolock( page_t * page )
[1]150{
[632]151        page_t   * buddy;            // searched buddy block page descriptor
152        uint32_t   buddy_index;      // buddy bloc index in page_tbl[]
153        page_t   * current;          // current (merged) block page descriptor
154        uint32_t   current_index;    // current (merged) block index in page_tbl[]
155        uint32_t   current_order;    // current (merged) block order
[7]156
[160]157        ppm_t    * ppm         = &LOCAL_CLUSTER->ppm;
158        page_t   * pages_tbl   = ppm->pages_tbl;
[1]159
[632]160assert( !page_is_flag( page , PG_FREE ) ,
161"page already released : ppn = %x\n" , ppm_page2ppn(XPTR(local_cxy,page)) );
[177]162
[632]163assert( !page_is_flag( page , PG_RESERVED ) ,
164"reserved page : ppn = %x\n" , ppm_page2ppn(XPTR(local_cxy,page)) );
[407]165
[160]166        // update released page descriptor flags
[1]167        page_set_flag( page , PG_FREE );
168
[160]169        // search the buddy page descriptor
170        // - merge with current page descriptor if found
171        // - exit to release the current page descriptor if not found
[632]172        current       = page;
[160]173        current_index = (uint32_t)(page - ppm->pages_tbl);
[18]174        for( current_order = page->order ;
[160]175             current_order < CONFIG_PPM_MAX_ORDER ;
176             current_order++ )
177        {
[7]178                buddy_index = current_index ^ (1 << current_order);
179                buddy       = pages_tbl + buddy_index;
[18]180
[632]181        // exit this loop if buddy block not found
182                if( !page_is_flag( buddy , PG_FREE ) || 
183            (buddy->order != current_order) ) break;
[1]184
[632]185                // remove buddy block from free_list
[7]186                list_unlink( &buddy->list );
[1]187                ppm->free_pages_nr[current_order] --;
[18]188
[632]189        // reset order field in buddy block page descriptor
[7]190                buddy->order = 0;
[632]191
192                // compute merged block index in page_tbl[]
[7]193                current_index &= buddy_index;
[1]194        }
[18]195
[632]196        // update pointer and order field for merged block page descriptor
[7]197        current        = pages_tbl + current_index;
198        current->order = current_order;
[1]199
[632]200        // insert merged block in free list
[7]201        list_add_first( &ppm->free_pages_root[current_order] , &current->list );
[1]202        ppm->free_pages_nr[current_order] ++;
203
[433]204}  // end ppm_free_pages_nolock()
205
[1]206////////////////////////////////////////////
207page_t * ppm_alloc_pages( uint32_t   order )
208{
[632]209        page_t   * current_block;
[160]210        uint32_t   current_order;
[1]211        uint32_t   current_size;
[632]212        page_t   * found_block; 
[551]213
[635]214    thread_t * this = CURRENT_THREAD;
215
[438]216#if DEBUG_PPM_ALLOC_PAGES
[433]217uint32_t cycle = (uint32_t)hal_get_cycles();
[438]218if( DEBUG_PPM_ALLOC_PAGES < cycle )
[632]219printk("\n[%s] thread[%x,%x] enter for %d page(s) in cluster %x / cycle %d\n",
[634]220__FUNCTION__, this->process->pid, this->trdid, 1<<order, local_cxy, cycle );
[433]221#endif
[1]222
[438]223#if(DEBUG_PPM_ALLOC_PAGES & 0x1)
224if( DEBUG_PPM_ALLOC_PAGES < cycle )
[632]225ppm_remote_display( local_cxy );
[433]226#endif
227
[160]228        ppm_t    * ppm = &LOCAL_CLUSTER->ppm;
[1]229
[611]230// check order
231assert( (order < CONFIG_PPM_MAX_ORDER) , "illegal order argument = %d\n" , order );
[1]232
[632]233    //build extended pointer on lock protecting remote PPM
234    xptr_t lock_xp = XPTR( local_cxy , &ppm->free_lock );
[1]235
[160]236        // take lock protecting free lists
[632]237        remote_busylock_acquire( lock_xp );
[1]238
[632]239        current_block = NULL;
[635]240    current_order = order;
[632]241
[635]242        // search a free block equal or larger than requested size
243        while( current_order < CONFIG_PPM_MAX_ORDER )
[1]244        {
[635]245        // get local pointer on the root of relevant free_list (same in all clusters)
246        list_entry_t * root = &ppm->free_pages_root[current_order];
247
248                if( !list_is_empty( root ) )
[1]249                {
[632]250            // get first free block in this free_list
[635]251                        current_block = LIST_FIRST( root , page_t , list );
[632]252
253            // remove this block from this free_list
254                        list_unlink( &current_block->list );
[635]255                ppm->free_pages_nr[current_order] --;
[632]256
257            // register pointer on found block
258            found_block = current_block;
259
260            // compute found block size
261                current_size = (1 << current_order);
262
263                        break; 
[1]264                }
[635]265
266        // increment loop index
267        current_order++;
[1]268        }
269
[632]270        if( current_block == NULL ) // return failure if no free block found
[1]271        {
[160]272                // release lock protecting free lists
[632]273                remote_busylock_release( lock_xp );
[1]274
[635]275        printk("\n[%s] thread[%x,%x] cannot allocate %d page(s) in cluster %x\n",
276        __FUNCTION__, this->process->pid, this->trdid, 1<<order, local_cxy );
[433]277
[160]278                return NULL;
279        }
[18]280
[1]281
[632]282        // split the found block in smaller sub-blocks if required
[160]283        // and update the free-lists accordingly
[1]284        while( current_order > order )
285        {
[635]286        // update size and order
[1]287                current_order --;
[635]288                current_size >>= 1;
[632]289
[635]290        // update order fiels in new free block
[632]291                current_block = found_block + current_size;
292                current_block->order = current_order;
[18]293
[632]294        // insert new free block in relevant free_list
295                list_add_first( &ppm->free_pages_root[current_order] , &current_block->list );
[1]296                ppm->free_pages_nr[current_order] ++;
297        }
[18]298
[632]299        // update found block page descriptor
300        page_clear_flag( found_block , PG_FREE );
301        page_refcount_up( found_block );
302        found_block->order = order;
[1]303
[160]304        // release lock protecting free lists
[632]305        remote_busylock_release( lock_xp );
[18]306
[585]307    // update DQDT
[632]308    dqdt_increment_pages( local_cxy , order );
[585]309
[438]310#if DEBUG_PPM_ALLOC_PAGES
[433]311cycle = (uint32_t)hal_get_cycles();
[438]312if( DEBUG_PPM_ALLOC_PAGES < cycle )
[632]313printk("\n[%s] thread[%x,%x] exit for %d page(s) in cluster %x / ppn = %x / cycle %d\n",
[611]314__FUNCTION__, this->process->pid, this->trdid, 
[635]3151<<order, local_cxy, ppm_page2ppn(XPTR( local_cxy , found_block )), cycle );
[433]316#endif
[7]317
[611]318#if(DEBUG_PPM_ALLOC_PAGES & 0x1)
319if( DEBUG_PPM_ALLOC_PAGES < cycle )
[632]320ppm_remote_display( local_cxy );
[611]321#endif
322
[632]323        return found_block;
[1]324
[433]325}  // end ppm_alloc_pages()
[1]326
[433]327
[1]328////////////////////////////////////
329void ppm_free_pages( page_t * page )
330{
331        ppm_t * ppm = &LOCAL_CLUSTER->ppm;
[18]332
[438]333#if DEBUG_PPM_FREE_PAGES
[632]334thread_t * this  = CURRENT_THREAD;
335uint32_t   cycle = (uint32_t)hal_get_cycles();
[438]336if( DEBUG_PPM_FREE_PAGES < cycle )
[632]337printk("\n[%s] thread[%x,%x] enter for %d page(s) in cluster %x / ppn %x / cycle %d\n",
[611]338__FUNCTION__, this->process->pid, this->trdid, 
[632]3391<<page->order, local_cxy, ppm_page2ppn(XPTR(local_cxy , page)), cycle );
[433]340#endif
341
[438]342#if(DEBUG_PPM_FREE_PAGES & 0x1)
343if( DEBUG_PPM_FREE_PAGES < cycle )
[632]344ppm_remote_display( local_cxy );
[433]345#endif
346
[632]347    //build extended pointer on lock protecting free_lists
348    xptr_t lock_xp = XPTR( local_cxy , &ppm->free_lock );
349
[160]350        // get lock protecting free_pages[] array
[632]351        remote_busylock_acquire( lock_xp );
[1]352
[18]353        ppm_free_pages_nolock( page );
[1]354
[632]355        // release lock protecting free_lists
356        remote_busylock_release( lock_xp );
[433]357
[585]358    // update DQDT
[632]359    dqdt_decrement_pages( local_cxy , page->order );
[585]360
[438]361#if DEBUG_PPM_FREE_PAGES
[433]362cycle = (uint32_t)hal_get_cycles();
[438]363if( DEBUG_PPM_FREE_PAGES < cycle )
[632]364printk("\n[%s] thread[%x,%x] exit for %d page(s) in cluster %x / ppn %x / cycle %d\n",
[611]365__FUNCTION__, this->process->pid, this->trdid, 
[632]3661<<page->order, local_cxy, ppm_page2ppn(XPTR(local_cxy , page)) , cycle );
[433]367#endif
368
[611]369#if(DEBUG_PPM_FREE_PAGES & 0x1)
370if( DEBUG_PPM_FREE_PAGES < cycle )
[632]371ppm_remote_display( local_cxy );
[611]372#endif
373
[567]374}  // end ppm_free_pages()
[1]375
[632]376/////////////////////////////////////////////
[635]377void * ppm_remote_alloc_pages( cxy_t     cxy,
[632]378                               uint32_t  order )
[1]379{
[632]380        uint32_t   current_order;
381        uint32_t   current_size;
382    page_t   * current_block;   
383    page_t   * found_block;
384
[635]385    thread_t * this  = CURRENT_THREAD;
386
[634]387#if DEBUG_PPM_REMOTE_ALLOC_PAGES
[632]388uint32_t   cycle = (uint32_t)hal_get_cycles();
[634]389if( DEBUG_PPM_REMOTE_ALLOC_PAGES < cycle )
[632]390printk("\n[%s] thread[%x,%x] enter for %d small page(s) in cluster %x / cycle %d\n",
391__FUNCTION__, this->process->pid, this->trdid, 1<<order, cxy, cycle );
392#endif
393
[634]394#if(DEBUG_PPM_REMOTE_ALLOC_PAGES & 0x1)
395if( DEBUG_PPM_REMOTE_ALLOC_PAGES < cycle )
[632]396ppm_remote_display( cxy );
397#endif
398
399// check order
400assert( (order < CONFIG_PPM_MAX_ORDER) , "illegal order argument = %d\n" , order );
401
402    // get local pointer on PPM (same in all clusters)
403        ppm_t * ppm = &LOCAL_CLUSTER->ppm;
404
405    //build extended pointer on lock protecting remote PPM
406    xptr_t lock_xp = XPTR( cxy , &ppm->free_lock );
407
408        // take lock protecting free lists in remote cluster
409        remote_busylock_acquire( lock_xp );
410
411    current_block = NULL;   
[635]412    current_order = order;
[632]413
[635]414    // search a free block equal or larger than requested size
415    while( current_order < CONFIG_PPM_MAX_ORDER )
416    {
417        // get local pointer on the root of relevant free_list (same in all clusters)
[632]418        list_entry_t * root = &ppm->free_pages_root[current_order];
419
[635]420                if( !list_remote_is_empty( cxy , root ) )  // list non empty => success
[632]421                {
422            // get local pointer on first free page descriptor in remote cluster
423                        current_block = LIST_REMOTE_FIRST( cxy, root , page_t , list );
424
425            // remove first free page from the free-list in remote cluster
426                        list_remote_unlink( cxy , &current_block->list );
[635]427                hal_remote_atomic_add( XPTR( cxy , &ppm->free_pages_nr[current_order] ), -1 );
[632]428
429            // register found block
430            found_block = current_block;
431
432            // compute found block size
433                current_size = (1 << current_order);
434
435                        break;
436                }
[635]437
438        // increment loop index
439        current_order++;
[632]440        }
441
442        if( current_block == NULL ) // return failure
443        {
444                // release lock protecting free lists
445                remote_busylock_release( lock_xp );
446
[635]447        printk("\n[ERROR] in %s : thread[%x,%x] cannot allocate %d page(s) in cluster %x\n",
448        __FUNCTION__, this->process->pid, this->trdid, 1<<order, cxy );
[632]449
450                return XPTR_NULL;
451        }
452
453        // split the found block in smaller sub-blocks if required
454        // and update the free-lists accordingly in remote cluster
455        while( current_order > order )
456        {
[635]457        // update order and size
[632]458                current_order --;
459                current_size >>= 1;
460
461        // update new free block order field in remote cluster
[635]462                current_block = found_block + current_size;
[632]463                hal_remote_s32( XPTR( cxy , &current_block->order ) , current_order );
464
465        // get local pointer on the root of the relevant free_list in remote cluster 
466        list_entry_t * root = &ppm->free_pages_root[current_order];
467
468        // insert new free block in this free_list
469                list_remote_add_first( cxy , root, &current_block->list );
470
471        // update free-list number of items in remote cluster
472        hal_remote_atomic_add( XPTR(cxy , &ppm->free_pages_nr[current_order]), 1 );
473        }
474
[634]475        // update refcount, flags and order fields in found block
[632]476        page_remote_clear_flag( XPTR( cxy , found_block ), PG_FREE );
477        page_remote_refcount_up( XPTR( cxy , found_block ) );
478        hal_remote_s32( XPTR( cxy , &found_block->order ) , order );
479   
480        // release lock protecting free lists in remote cluster
481        remote_busylock_release( lock_xp );
482
483    // update DQDT page counter in remote cluster
484    dqdt_increment_pages( cxy , order );
485
[634]486#if DEBUG_PPM_REMOTE_ALLOC_PAGES
[632]487cycle = (uint32_t)hal_get_cycles();
[634]488if( DEBUG_PPM_REMOTE_ALLOC_PAGES < cycle )
[632]489printk("\n[%s] thread[%x,%x] exit for %d page(s) / ppn = %x in cluster %x / cycle %d\n",
490__FUNCTION__, this->process->pid, this->trdid, 
4911<<order, ppm_page2ppn(XPTR( local_cxy , found_block )), cxy, cycle );
492#endif
493
[634]494#if(DEBUG_PPM_REMOTE_ALLOC_PAGES & 0x1)
495if( DEBUG_PPM_REMOTE_ALLOC_PAGES < cycle )
[632]496ppm_remote_display( cxy );
497#endif
498
[635]499        return found_block;
[632]500
501}  // end ppm_remote_alloc_pages()
502
503//////////////////////////////////////////
504void ppm_remote_free_pages( cxy_t     cxy,
505                            page_t  * page )
506{
507    xptr_t     page_xp;          // extended pointer on released page descriptor
508    uint32_t   order;            // released block order
509        page_t   * buddy_ptr;        // searched buddy block page descriptor
510    uint32_t   buddy_order;      // searched buddy block order
511        uint32_t   buddy_index;      // buddy block index in page_tbl[]
512        page_t   * current_ptr;      // current (merged) block page descriptor
513        uint32_t   current_index;    // current (merged) block index in page_tbl[]
514        uint32_t   current_order;    // current (merged) block order
515
[634]516#if DEBUG_PPM_REMOTE_FREE_PAGES
[632]517thread_t * this  = CURRENT_THREAD;
518uint32_t   cycle = (uint32_t)hal_get_cycles();
[634]519if( DEBUG_PPM_REMOTE_FREE_PAGES < cycle )
520printk("\n[%s] thread[%x,%x] enter for %d page(s) / cxy %x / ppn %x / cycle %d\n",
[632]521__FUNCTION__, this->process->pid, this->trdid, 
5221<<page->order, cxy, ppm_page2ppn(XPTR(cxy , page)), cycle );
523#endif
524
[634]525#if(DEBUG_PPM_REMOTE_FREE_PAGES & 0x1)
526if( DEBUG_PPM_REMOTE_FREE_PAGES < cycle )
[632]527ppm_remote_display( cxy );
528#endif
529
530    // build extended pointer on released page descriptor
531    page_xp = XPTR( cxy , page );
532   
533    // get released page order
534    order = hal_remote_l32( XPTR( cxy , &page->order ) );
535
536    // get local pointer on PPM (same in all clusters)
537        ppm_t * ppm = &LOCAL_CLUSTER->ppm;
538
539    // build extended pointer on lock protecting remote PPM
540    xptr_t lock_xp = XPTR( cxy , &ppm->free_lock );
541
542    // get local pointer on remote PPM page_tbl[] array
543        page_t * pages_tbl = hal_remote_lpt( XPTR( cxy , &ppm->pages_tbl ) );
544
545        // get lock protecting free_pages in remote cluster
546        remote_busylock_acquire( lock_xp );
547
548assert( !page_remote_is_flag( page_xp , PG_FREE ) ,
549"page already released : ppn = %x\n" , ppm_page2ppn(XPTR(local_cxy,page)) );
550
551assert( !page_remote_is_flag( page_xp , PG_RESERVED ) ,
552"reserved page : ppn = %x\n" , ppm_page2ppn(XPTR(local_cxy,page)) );
553
554        // update released page descriptor flags
555        page_remote_set_flag( page_xp , PG_FREE );
556
557        // search the buddy page descriptor
558        // - merge with current page descriptor if found
559        // - exit to release the current page descriptor if not found
560        current_ptr   = page;
561        current_index = (uint32_t)(page - ppm->pages_tbl);
562        for( current_order = order ;
563             current_order < CONFIG_PPM_MAX_ORDER ;
564             current_order++ )
565        {
566                buddy_index = current_index ^ (1 << current_order);
567                buddy_ptr   = pages_tbl + buddy_index;
568
569        // get buddy block order
570        buddy_order = hal_remote_l32( XPTR( cxy , &buddy_ptr->order ) );
571
572        // exit loop if buddy block not found
573                if( !page_remote_is_flag( XPTR( cxy , buddy_ptr ) , PG_FREE ) || 
574            (buddy_order != current_order) ) break;
575
576                // remove buddy from free list in remote cluster
577                list_remote_unlink( cxy , &buddy_ptr->list );
578        hal_remote_atomic_add( XPTR( cxy , &ppm->free_pages_nr[current_order] ) , -1 );
579
580        // reset order field in buddy block page descriptor
581        hal_remote_s32( XPTR( cxy , &buddy_ptr->order ) , 0 );
582
583                // compute merged block index in page_tbl[] array
584                current_index &= buddy_index;
585        }
586
587        // update merged page descriptor order field
588        current_ptr = pages_tbl + current_index;
589    hal_remote_s32( XPTR( cxy , &current_ptr->order ) , current_order );
590
591        // insert merged block into relevant free list in remote cluster
592        list_remote_add_first( cxy , &ppm->free_pages_root[current_order] , &current_ptr->list );
593    hal_remote_atomic_add( XPTR( cxy , &ppm->free_pages_nr[current_order] ) , 1 );
594
595        // release lock protecting free_pages[] array
596        remote_busylock_release( lock_xp );
597
598    // update DQDT
599    dqdt_decrement_pages( cxy , page->order );
600
[634]601#if DEBUG_PPM_REMOTE_FREE_PAGES
[632]602cycle = (uint32_t)hal_get_cycles();
[634]603if( DEBUG_PPM_REMOTE_FREE_PAGES < cycle )
[632]604printk("\n[%s] thread[%x,%x] exit for %d page(s) in cluster %x / ppn %x / cycle %d\n",
605__FUNCTION__, this->process->pid, this->trdid, 
6061<<page->order, cxy, ppm_page2ppn(XPTR(cxy , page)), cycle );
607#endif
608
[634]609#if(DEBUG_PPM_REMOTE_FREE_PAGES & 0x1)
610if( DEBUG_PPM_REMOTE_FREE_PAGES < cycle )
[632]611ppm_remote_display( cxy );
612#endif
613
614}  // end ppm_remote_free_pages()
615
616////////////////////////////////////
617void ppm_remote_display( cxy_t cxy )
618{
[1]619        uint32_t       order;
620        list_entry_t * iter;
[634]621    xptr_t         page_xp;
[1]622
[433]623    ppm_t * ppm = &LOCAL_CLUSTER->ppm;
624
[632]625    // build extended pointer on lock protecting remote PPM
[634]626    xptr_t ppm_lock_xp = XPTR( cxy , &ppm->free_lock );
[1]627
[634]628    // get pointers on TXT0 chdev
629    xptr_t    txt0_xp  = chdev_dir.txt_tx[0];
630    cxy_t     txt0_cxy = GET_CXY( txt0_xp );
631    chdev_t * txt0_ptr = GET_PTR( txt0_xp );
[632]632
[634]633    // build extended pointer on remote TXT0 lock
634    xptr_t  txt_lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock );
[18]635
[634]636        // get PPM lock
637        remote_busylock_acquire( ppm_lock_xp );
638
639    // get TXT0 lock
640    remote_busylock_acquire( txt_lock_xp );
641
642        nolock_printk("\n***** PPM in cluster %x / %d pages\n", local_cxy , ppm->pages_nr );
643
[1]644        for( order = 0 ; order < CONFIG_PPM_MAX_ORDER ; order++ )
645        {
[632]646        // get number of free pages for free_list[order] in remote cluster
647        uint32_t n = hal_remote_l32( XPTR( cxy , &ppm->free_pages_nr[order] ) );
[18]648
[634]649                nolock_printk("- order = %d / n = %d\t: ", order , n );
650
[632]651                LIST_REMOTE_FOREACH( cxy , &ppm->free_pages_root[order] , iter )
[1]652                {
[634]653            // build extended pointer on page descriptor
654            page_xp = XPTR( cxy , LIST_ELEMENT( iter , page_t , list ) );
655
656            // display PPN
657                        nolock_printk("%x," , ppm_page2ppn( page_xp ) );
[1]658                }
[18]659
[634]660                nolock_printk("\n");
[1]661        }
662
[634]663        // release TXT0 lock
664        remote_busylock_release( txt_lock_xp );
665
666        // release PPM lock
667        remote_busylock_release( ppm_lock_xp );
[160]668}
[1]669
[632]670////////////////////////////////
671error_t ppm_assert_order( void )
[1]672{
673        uint32_t       order;
674        list_entry_t * iter;
675        page_t       * page;
[18]676
[632]677    ppm_t * ppm = &LOCAL_CLUSTER->ppm;
678
[407]679        for( order=0 ; order < CONFIG_PPM_MAX_ORDER ; order++ )
[1]680        {
681                if( list_is_empty( &ppm->free_pages_root[order] ) ) continue;
[18]682
[1]683                LIST_FOREACH( &ppm->free_pages_root[order] , iter )
684                {
685                        page = LIST_ELEMENT( iter , page_t , list );
[160]686                        if( page->order != order )  return -1;
[1]687                }
688        }
689
[160]690        return 0;
691}
[53]692
[567]693
694//////////////////////////////////////////////////////////////////////////////////////
695//     functions to handle  dirty physical pages
696//////////////////////////////////////////////////////////////////////////////////////
697
[606]698//////////////////////////////////////////
699bool_t ppm_page_do_dirty( xptr_t page_xp )
[567]700{
701        bool_t done = false;
702
[606]703    // get page cluster and local pointer
704    page_t * page_ptr = GET_PTR( page_xp );
705    cxy_t    page_cxy = GET_CXY( page_xp );
706
707    // get local pointer on PPM (same in all clusters)
[567]708        ppm_t * ppm = &LOCAL_CLUSTER->ppm;
709
[606]710    // build extended pointers on page lock, page flags, and PPM dirty list lock
711    xptr_t page_lock_xp  = XPTR( page_cxy , &page_ptr->lock  );   
712    xptr_t page_flags_xp = XPTR( page_cxy , &page_ptr->flags );
713    xptr_t dirty_lock_xp = XPTR( page_cxy , &ppm->dirty_lock );
714           
715        // lock the remote PPM dirty_list
716        remote_queuelock_acquire( dirty_lock_xp );
[567]717
[606]718    // lock the remote page
719    remote_busylock_acquire( page_lock_xp );
720
721    // get remote page flags
722    uint32_t flags = hal_remote_l32( page_flags_xp );
723
724        if( (flags & PG_DIRTY) == 0 )
[567]725        {
726                // set dirty flag in page descriptor
[606]727        hal_remote_s32( page_flags_xp , flags | PG_DIRTY );
[567]728
[632]729                // insert the page in the remote dirty list
730        list_remote_add_first( page_cxy , &ppm->dirty_root , &page_ptr->list );
[606]731
[567]732                done = true;
733        }
734
[606]735    // unlock the remote page
736    remote_busylock_release( page_lock_xp );
[567]737
[606]738        // unlock the remote PPM dirty_list
739        remote_queuelock_release( dirty_lock_xp );
740
[567]741        return done;
742
[606]743} // end ppm_page_do_dirty()
744
745////////////////////////////////////////////
746bool_t ppm_page_undo_dirty( xptr_t page_xp )
[567]747{
748        bool_t done = false;
749
[606]750    // get page cluster and local pointer
751    page_t * page_ptr = GET_PTR( page_xp );
752    cxy_t    page_cxy = GET_CXY( page_xp );
753
754    // get local pointer on PPM (same in all clusters)
[567]755        ppm_t * ppm = &LOCAL_CLUSTER->ppm;
756
[606]757    // build extended pointers on page lock, page flags, and PPM dirty list lock
758    xptr_t page_lock_xp  = XPTR( page_cxy , &page_ptr->lock  );
759    xptr_t page_flags_xp = XPTR( page_cxy , &page_ptr->flags );
760    xptr_t dirty_lock_xp = XPTR( page_cxy , &ppm->dirty_lock );
761           
762        // lock the remote PPM dirty_list
763        remote_queuelock_acquire( XPTR( page_cxy , &ppm->dirty_lock ) );
[567]764
[606]765    // lock the remote page
766    remote_busylock_acquire( page_lock_xp );
767
768    // get remote page flags
769    uint32_t flags = hal_remote_l32( page_flags_xp );
770
771        if( (flags & PG_DIRTY) )  // page is dirty
[567]772        {
[606]773                // reset dirty flag in page descriptor
774        hal_remote_s32( page_flags_xp , flags & (~PG_DIRTY) );
[567]775
[632]776        // remove the page from remote dirty list
777        list_remote_unlink( page_cxy , &page_ptr->list );
[606]778
[567]779                done = true;
780        }
781
[606]782    // unlock the remote page
783    remote_busylock_release( page_lock_xp );
[567]784
[606]785        // unlock the remote PPM dirty_list
786        remote_queuelock_release( dirty_lock_xp );
787
[567]788        return done;
789
[606]790}  // end ppm_page_undo_dirty()
791
792/////////////////////////////////
793void ppm_sync_dirty_pages( void )
[567]794{
[606]795        ppm_t * ppm = &LOCAL_CLUSTER->ppm;
[567]796
[606]797    // get local pointer on PPM dirty_root
798    list_entry_t * dirty_root = &ppm->dirty_root;
799
800    // build extended pointer on PPM dirty_lock
801    xptr_t dirty_lock_xp = XPTR( local_cxy , &ppm->dirty_lock );
802
[567]803        // get the PPM dirty_list lock
[606]804        remote_queuelock_acquire( dirty_lock_xp );
[567]805
806        while( !list_is_empty( &ppm->dirty_root ) )
807        {
[606]808                page_t * page = LIST_FIRST( dirty_root ,  page_t , list );
809        xptr_t   page_xp = XPTR( local_cxy , page );
[567]810
[606]811        // build extended pointer on page lock
812        xptr_t page_lock_xp = XPTR( local_cxy , &page->lock );
813
[567]814                // get the page lock
[606]815                remote_busylock_acquire( page_lock_xp );
[567]816
817                // sync the page
[606]818                vfs_fs_move_page( page_xp , false );  // from mapper to device
[567]819
820                // release the page lock
[606]821                remote_busylock_release( page_lock_xp );
[567]822        }
823
824        // release the PPM dirty_list lock
[606]825        remote_queuelock_release( dirty_lock_xp );
[567]826
[606]827}  // end ppm_sync_dirty_pages()
828
Note: See TracBrowser for help on using the repository browser.