source: trunk/kernel/mm/ppm.c @ 657

Last change on this file since 657 was 657, checked in by alain, 4 years ago

Introduce remote_buf.c/.h & socket.c/.h files.
Update dev_nic.c/.h files.

File size: 25.8 KB
RevLine 
[1]1/*
[636]2 * ppm.c -  Physical Pages Manager implementation
[1]3 *
4 * Authors  Ghassan Almaless (2008,2009,2010,2011,2012)
[657]5 *          Alain Greiner    (2016,2017,2018,2019,2020)
[1]6 *
7 * Copyright (c) UPMC Sorbonne Universites
8 *
9 * This file is part of ALMOS-MKH.
10 *
11 * ALMOS-MKH.is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; version 2.0 of the License.
14 *
15 * ALMOS-MKH.is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with ALMOS-MKH.; if not, write to the Free Software Foundation,
22 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
23 */
24
[14]25#include <kernel_config.h>
[457]26#include <hal_kernel_types.h>
[1]27#include <hal_special.h>
28#include <printk.h>
29#include <list.h>
30#include <bits.h>
31#include <page.h>
[585]32#include <dqdt.h>
[567]33#include <busylock.h>
34#include <queuelock.h>
[1]35#include <thread.h>
36#include <cluster.h>
37#include <kmem.h>
38#include <process.h>
[567]39#include <mapper.h>
[1]40#include <ppm.h>
[606]41#include <vfs.h>
[1]42
[567]43////////////////////////////////////////////////////////////////////////////////////////
[634]44//         global variables
45////////////////////////////////////////////////////////////////////////////////////////
46
47extern chdev_directory_t    chdev_dir;          // allocated in kernel_init.c
48
49////////////////////////////////////////////////////////////////////////////////////////
[567]50//     functions to  translate [ page <-> base <-> ppn ]
51////////////////////////////////////////////////////////////////////////////////////////
52
[50]53/////////////////////////////////////////////
[315]54inline xptr_t ppm_page2base( xptr_t page_xp )
[1]55{
[315]56        ppm_t  * ppm      = &LOCAL_CLUSTER->ppm;
[1]57
[315]58    cxy_t    page_cxy = GET_CXY( page_xp );
[437]59    page_t * page_ptr = GET_PTR( page_xp );
[315]60
[406]61   void   * base_ptr = ppm->vaddr_base + 
62                       ((page_ptr - ppm->pages_tbl)<<CONFIG_PPM_PAGE_SHIFT);
63
[315]64        return XPTR( page_cxy , base_ptr );
65
66} // end ppm_page2base()
67
68/////////////////////////////////////////////
69inline xptr_t ppm_base2page( xptr_t base_xp )
[1]70{
[315]71        ppm_t  * ppm = &LOCAL_CLUSTER->ppm;
[1]72
[315]73    cxy_t    base_cxy = GET_CXY( base_xp );
[437]74    void   * base_ptr = GET_PTR( base_xp );
[315]75
76        page_t * page_ptr = ppm->pages_tbl + 
77                        ((base_ptr - ppm->vaddr_base)>>CONFIG_PPM_PAGE_SHIFT);
78
79        return XPTR( base_cxy , page_ptr );
80
81}  // end ppm_base2page()
82
83
84
[50]85///////////////////////////////////////////
[315]86inline ppn_t ppm_page2ppn( xptr_t page_xp )
87{
88        ppm_t  * ppm      = &LOCAL_CLUSTER->ppm;
89
90    cxy_t    page_cxy = GET_CXY( page_xp );
[437]91    page_t * page_ptr = GET_PTR( page_xp );
[315]92
93    paddr_t  paddr    = PADDR( page_cxy , (page_ptr - ppm->pages_tbl)<<CONFIG_PPM_PAGE_SHIFT );
94
[437]95    return (ppn_t)(paddr >> CONFIG_PPM_PAGE_SHIFT);
[315]96
97}  // end hal_page2ppn()
98
99///////////////////////////////////////
100inline xptr_t ppm_ppn2page( ppn_t ppn )
101{
[437]102        ppm_t   * ppm  = &LOCAL_CLUSTER->ppm;
[315]103
[437]104    paddr_t  paddr = ((paddr_t)ppn) << CONFIG_PPM_PAGE_SHIFT;
[315]105
[437]106    cxy_t    cxy   = CXY_FROM_PADDR( paddr );
107    lpa_t    lpa   = LPA_FROM_PADDR( paddr );
[315]108
[437]109    return XPTR( cxy , &ppm->pages_tbl[lpa>>CONFIG_PPM_PAGE_SHIFT] );
[315]110
111}  // end hal_ppn2page
112
113
114
115///////////////////////////////////////
116inline xptr_t ppm_ppn2base( ppn_t ppn )
117{
[437]118        ppm_t  * ppm   = &LOCAL_CLUSTER->ppm;
[315]119   
[437]120    paddr_t  paddr = ((paddr_t)ppn) << CONFIG_PPM_PAGE_SHIFT;
[315]121
[437]122    cxy_t    cxy   = CXY_FROM_PADDR( paddr );
123    lpa_t    lpa   = LPA_FROM_PADDR( paddr );
[315]124
[437]125        return XPTR( cxy , (void *)ppm->vaddr_base + lpa );
[315]126
127}  // end ppm_ppn2base()
128
129///////////////////////////////////////////
130inline ppn_t ppm_base2ppn( xptr_t base_xp )
131{
132        ppm_t  * ppm      = &LOCAL_CLUSTER->ppm;
133
134    cxy_t    base_cxy = GET_CXY( base_xp );
[437]135    void   * base_ptr = GET_PTR( base_xp );
[315]136
137    paddr_t  paddr    = PADDR( base_cxy , (base_ptr - ppm->vaddr_base) );
138
[437]139    return (ppn_t)(paddr >> CONFIG_PPM_PAGE_SHIFT);
[315]140
141}  // end ppm_base2ppn()
142
143
[567]144////////////////////////////////////////////////////////////////////////////////////////
145//     functions to  allocate / release  physical pages
146////////////////////////////////////////////////////////////////////////////////////////
[315]147
148///////////////////////////////////////////
[50]149void ppm_free_pages_nolock( page_t * page )
[1]150{
[636]151        page_t   * buddy;               // searched buddy page descriptor
152        uint32_t   buddy_index;         // buddy page index in page_tbl[]
[656]153        page_t   * current_ptr;         // current (merged) page descriptor
[636]154        uint32_t   current_index;       // current (merged) page index in page_tbl[]
155        uint32_t   current_order;       // current (merged) page order
[7]156
[160]157        ppm_t    * ppm         = &LOCAL_CLUSTER->ppm;
158        page_t   * pages_tbl   = ppm->pages_tbl;
[1]159
[632]160assert( !page_is_flag( page , PG_FREE ) ,
[636]161"page already released : ppn = %x\n" , ppm_page2ppn( XPTR( local_cxy , page ) ) );
[177]162
[632]163assert( !page_is_flag( page , PG_RESERVED ) ,
[636]164"reserved page : ppn = %x\n" , ppm_page2ppn( XPTR( local_cxy , page ) ) );
[407]165
[636]166        // set FREE flag in released page descriptor
[1]167        page_set_flag( page , PG_FREE );
168
[636]169    // initialise loop variables
[656]170    current_ptr   = page;
[636]171    current_order = page->order;
172        current_index = page - ppm->pages_tbl;
173
[160]174        // search the buddy page descriptor
[636]175        // - merge with current page if buddy found
176        // - exit to release the current page when buddy not found
177    while( current_order < CONFIG_PPM_MAX_ORDER )
178    {
179        // compute buddy page index and page descriptor
[7]180                buddy_index = current_index ^ (1 << current_order);
181                buddy       = pages_tbl + buddy_index;
[636]182       
183        // exit loop if buddy not found in current free list
184                if( !page_is_flag( buddy , PG_FREE ) || (buddy->order != current_order) ) break;
[18]185
[636]186        // remove buddy page from current free_list
[7]187                list_unlink( &buddy->list );
[1]188                ppm->free_pages_nr[current_order] --;
[18]189
[636]190        // reset order field in buddy page descriptor
[7]191                buddy->order = 0;
[632]192
[656]193                // compute next values for loop variables
[7]194                current_index &= buddy_index;
[636]195        current_order++;
[656]196        current_ptr = pages_tbl + current_index; 
[636]197    }
198
199        // update order field for merged page descriptor
[656]200        current_ptr->order = current_order;
[1]201
[636]202        // insert merged page in relevant free list
[656]203        list_add_first( &ppm->free_pages_root[current_order] , &current_ptr->list );
[1]204        ppm->free_pages_nr[current_order] ++;
205
[433]206}  // end ppm_free_pages_nolock()
207
[1]208////////////////////////////////////////////
209page_t * ppm_alloc_pages( uint32_t   order )
210{
[632]211        page_t   * current_block;
[160]212        uint32_t   current_order;
[1]213        uint32_t   current_size;
[632]214        page_t   * found_block; 
[551]215
[635]216    thread_t * this = CURRENT_THREAD;
217
[656]218        ppm_t    * ppm = &LOCAL_CLUSTER->ppm;
219
[438]220#if DEBUG_PPM_ALLOC_PAGES
[433]221uint32_t cycle = (uint32_t)hal_get_cycles();
222#endif
[1]223
[656]224#if DEBUG_PPM_ALLOC_PAGES
[438]225if( DEBUG_PPM_ALLOC_PAGES < cycle )
[636]226{
227    printk("\n[%s] thread[%x,%x] enter for %d page(s) in cluster %x / cycle %d\n",
228    __FUNCTION__, this->process->pid, this->trdid, 1<<order, local_cxy, cycle );
[656]229    if( DEBUG_PPM_ALLOC_PAGES & 1 ) ppm_remote_display( local_cxy );
[636]230}
[433]231#endif
232
[611]233// check order
234assert( (order < CONFIG_PPM_MAX_ORDER) , "illegal order argument = %d\n" , order );
[1]235
[632]236    //build extended pointer on lock protecting remote PPM
237    xptr_t lock_xp = XPTR( local_cxy , &ppm->free_lock );
[1]238
[160]239        // take lock protecting free lists
[632]240        remote_busylock_acquire( lock_xp );
[1]241
[632]242        current_block = NULL;
[635]243    current_order = order;
[632]244
[635]245        // search a free block equal or larger than requested size
246        while( current_order < CONFIG_PPM_MAX_ORDER )
[1]247        {
[635]248        // get local pointer on the root of relevant free_list (same in all clusters)
249        list_entry_t * root = &ppm->free_pages_root[current_order];
250
251                if( !list_is_empty( root ) )
[1]252                {
[632]253            // get first free block in this free_list
[635]254                        current_block = LIST_FIRST( root , page_t , list );
[632]255
256            // remove this block from this free_list
257                        list_unlink( &current_block->list );
[635]258                ppm->free_pages_nr[current_order] --;
[632]259
260            // register pointer on found block
261            found_block = current_block;
262
263            // compute found block size
264                current_size = (1 << current_order);
265
266                        break; 
[1]267                }
[635]268
269        // increment loop index
270        current_order++;
[1]271        }
272
[632]273        if( current_block == NULL ) // return failure if no free block found
[1]274        {
[160]275                // release lock protecting free lists
[632]276                remote_busylock_release( lock_xp );
[1]277
[635]278        printk("\n[%s] thread[%x,%x] cannot allocate %d page(s) in cluster %x\n",
279        __FUNCTION__, this->process->pid, this->trdid, 1<<order, local_cxy );
[433]280
[160]281                return NULL;
282        }
[18]283
[1]284
[632]285        // split the found block in smaller sub-blocks if required
[160]286        // and update the free-lists accordingly
[1]287        while( current_order > order )
288        {
[635]289        // update size and order
[1]290                current_order --;
[635]291                current_size >>= 1;
[632]292
[651]293        // update order fields in new free block
[632]294                current_block = found_block + current_size;
295                current_block->order = current_order;
[18]296
[632]297        // insert new free block in relevant free_list
298                list_add_first( &ppm->free_pages_root[current_order] , &current_block->list );
[1]299                ppm->free_pages_nr[current_order] ++;
300        }
[18]301
[632]302        // update found block page descriptor
303        page_clear_flag( found_block , PG_FREE );
304        page_refcount_up( found_block );
305        found_block->order = order;
[1]306
[160]307        // release lock protecting free lists
[632]308        remote_busylock_release( lock_xp );
[18]309
[585]310    // update DQDT
[632]311    dqdt_increment_pages( local_cxy , order );
[585]312
[656]313    hal_fence();
314
[438]315#if DEBUG_PPM_ALLOC_PAGES
316if( DEBUG_PPM_ALLOC_PAGES < cycle )
[636]317{
318    printk("\n[%s] thread[%x,%x] allocated %d page(s) in cluster %x / ppn %x / cycle %d\n",
319    __FUNCTION__, this->process->pid, this->trdid, 
320    1<<order, local_cxy, ppm_page2ppn(XPTR( local_cxy , found_block )), cycle );
[656]321    if( DEBUG_PPM_ALLOC_PAGES & 1 ) ppm_remote_display( local_cxy );
[636]322}
[433]323#endif
[7]324
[632]325        return found_block;
[1]326
[433]327}  // end ppm_alloc_pages()
[1]328
329////////////////////////////////////
330void ppm_free_pages( page_t * page )
331{
332        ppm_t * ppm = &LOCAL_CLUSTER->ppm;
[18]333
[438]334#if DEBUG_PPM_FREE_PAGES
[632]335thread_t * this  = CURRENT_THREAD;
336uint32_t   cycle = (uint32_t)hal_get_cycles();
[433]337#endif
338
[656]339#if DEBUG_PPM_FREE_PAGES
[438]340if( DEBUG_PPM_FREE_PAGES < cycle )
[636]341{
342    printk("\n[%s] thread[%x,%x] enter for %d page(s) in cluster %x / ppn %x / cycle %d\n",
343    __FUNCTION__, this->process->pid, this->trdid, 
344    1<<page->order, local_cxy, ppm_page2ppn(XPTR(local_cxy , page)), cycle );
[656]345    if( DEBUG_PPM_FREE_PAGES & 1 ) ppm_remote_display( local_cxy );
346}
[433]347#endif
348
[632]349    //build extended pointer on lock protecting free_lists
350    xptr_t lock_xp = XPTR( local_cxy , &ppm->free_lock );
351
[160]352        // get lock protecting free_pages[] array
[632]353        remote_busylock_acquire( lock_xp );
[1]354
[18]355        ppm_free_pages_nolock( page );
[1]356
[632]357        // release lock protecting free_lists
358        remote_busylock_release( lock_xp );
[433]359
[585]360    // update DQDT
[632]361    dqdt_decrement_pages( local_cxy , page->order );
[585]362
[656]363    hal_fence();
364
[438]365#if DEBUG_PPM_FREE_PAGES
366if( DEBUG_PPM_FREE_PAGES < cycle )
[636]367{
368    printk("\n[%s] thread[%x,%x] released %d page(s) in cluster %x / ppn %x / cycle %d\n",
369    __FUNCTION__, this->process->pid, this->trdid, 
370    1<<page->order, local_cxy, ppm_page2ppn(XPTR(local_cxy , page)) , cycle );
[656]371    if( DEBUG_PPM_FREE_PAGES & 1 ) ppm_remote_display( local_cxy );
[636]372}
[433]373#endif
374
[567]375}  // end ppm_free_pages()
[1]376
[636]377
[632]378/////////////////////////////////////////////
[656]379xptr_t ppm_remote_alloc_pages( cxy_t     cxy,
[632]380                               uint32_t  order )
[1]381{
[632]382        uint32_t   current_order;
383        uint32_t   current_size;
384    page_t   * current_block;   
385    page_t   * found_block;
386
[635]387    thread_t * this  = CURRENT_THREAD;
388
[656]389// check order
390assert( (order < CONFIG_PPM_MAX_ORDER) , "illegal order argument = %d\n" , order );
391
392    // get local pointer on PPM (same in all clusters)
393        ppm_t * ppm = &LOCAL_CLUSTER->ppm;
394
[634]395#if DEBUG_PPM_REMOTE_ALLOC_PAGES
[632]396uint32_t   cycle = (uint32_t)hal_get_cycles();
397#endif
398
[656]399#if DEBUG_PPM_REMOTE_ALLOC_PAGES
[634]400if( DEBUG_PPM_REMOTE_ALLOC_PAGES < cycle )
[636]401{
[656]402    printk("\n[%s] thread[%x,%x] enter for %d page(s) in cluster %x / cycle %d\n",
[636]403    __FUNCTION__, this->process->pid, this->trdid, 1<<order, cxy, cycle );
[656]404    if( DEBUG_PPM_REMOTE_ALLOC_PAGES & 1 ) ppm_remote_display( cxy );
[636]405}
[632]406#endif
407
408    //build extended pointer on lock protecting remote PPM
409    xptr_t lock_xp = XPTR( cxy , &ppm->free_lock );
410
411        // take lock protecting free lists in remote cluster
412        remote_busylock_acquire( lock_xp );
413
414    current_block = NULL;   
[635]415    current_order = order;
[632]416
[635]417    // search a free block equal or larger than requested size
418    while( current_order < CONFIG_PPM_MAX_ORDER )
419    {
420        // get local pointer on the root of relevant free_list (same in all clusters)
[632]421        list_entry_t * root = &ppm->free_pages_root[current_order];
422
[635]423                if( !list_remote_is_empty( cxy , root ) )  // list non empty => success
[632]424                {
425            // get local pointer on first free page descriptor in remote cluster
426                        current_block = LIST_REMOTE_FIRST( cxy, root , page_t , list );
427
428            // remove first free page from the free-list in remote cluster
429                        list_remote_unlink( cxy , &current_block->list );
[635]430                hal_remote_atomic_add( XPTR( cxy , &ppm->free_pages_nr[current_order] ), -1 );
[632]431
432            // register found block
433            found_block = current_block;
434
435            // compute found block size
436                current_size = (1 << current_order);
437
438                        break;
439                }
[635]440
441        // increment loop index
442        current_order++;
[632]443        }
444
445        if( current_block == NULL ) // return failure
446        {
447                // release lock protecting free lists
448                remote_busylock_release( lock_xp );
449
[635]450        printk("\n[ERROR] in %s : thread[%x,%x] cannot allocate %d page(s) in cluster %x\n",
451        __FUNCTION__, this->process->pid, this->trdid, 1<<order, cxy );
[632]452
453                return XPTR_NULL;
454        }
455
456        // split the found block in smaller sub-blocks if required
457        // and update the free-lists accordingly in remote cluster
458        while( current_order > order )
459        {
[635]460        // update order and size
[632]461                current_order --;
462                current_size >>= 1;
463
464        // update new free block order field in remote cluster
[635]465                current_block = found_block + current_size;
[632]466                hal_remote_s32( XPTR( cxy , &current_block->order ) , current_order );
467
468        // get local pointer on the root of the relevant free_list in remote cluster 
469        list_entry_t * root = &ppm->free_pages_root[current_order];
470
471        // insert new free block in this free_list
472                list_remote_add_first( cxy , root, &current_block->list );
473
474        // update free-list number of items in remote cluster
475        hal_remote_atomic_add( XPTR(cxy , &ppm->free_pages_nr[current_order]), 1 );
476        }
477
[634]478        // update refcount, flags and order fields in found block
[632]479        page_remote_clear_flag( XPTR( cxy , found_block ), PG_FREE );
480        page_remote_refcount_up( XPTR( cxy , found_block ) );
481        hal_remote_s32( XPTR( cxy , &found_block->order ) , order );
482   
483        // release lock protecting free lists in remote cluster
484        remote_busylock_release( lock_xp );
485
486    // update DQDT page counter in remote cluster
487    dqdt_increment_pages( cxy , order );
488
[656]489    hal_fence();
490
[634]491#if DEBUG_PPM_REMOTE_ALLOC_PAGES
492if( DEBUG_PPM_REMOTE_ALLOC_PAGES < cycle )
[636]493{
494    printk("\n[%s] thread[%x,%x] allocated %d page(s) in cluster %x / ppn %x / cycle %d\n",
495    __FUNCTION__, this->process->pid, this->trdid, 
496    1<<order, cxy, ppm_page2ppn(XPTR( cxy , found_block )), cycle );
[656]497    if( DEBUG_PPM_REMOTE_ALLOC_PAGES & 1 ) ppm_remote_display( cxy );
[636]498}
[632]499#endif
500
[656]501        return XPTR( cxy , found_block );
[632]502
503}  // end ppm_remote_alloc_pages()
504
[636]505///////////////////////////////////////////////
506void ppm_remote_free_pages( cxy_t     page_cxy,
507                            page_t  * page_ptr )
[632]508{
509    xptr_t     page_xp;          // extended pointer on released page descriptor
[636]510        page_t   * buddy_ptr;        // searched buddy page descriptor
511    uint32_t   buddy_order;      // searched buddy page order
512        uint32_t   buddy_index;      // buddy page index in page_tbl[]
513        page_t   * current_ptr;      // current (merged) page descriptor
514        uint32_t   current_index;    // current (merged) page index in page_tbl[]
515        uint32_t   current_order;    // current (merged) page order
[632]516
[656]517    // get local pointer on PPM (same in all clusters)
518        ppm_t * ppm = &LOCAL_CLUSTER->ppm;
519
520    // get page ppn and order
521    uint32_t   order = hal_remote_l32( XPTR( page_cxy , &page_ptr->order ) );
522
[634]523#if DEBUG_PPM_REMOTE_FREE_PAGES
[632]524thread_t * this  = CURRENT_THREAD;
525uint32_t   cycle = (uint32_t)hal_get_cycles();
[656]526ppn_t      ppn   = ppm_page2ppn( XPTR( page_cxy , page_ptr ) );
[632]527#endif
528
[656]529#if DEBUG_PPM_REMOTE_FREE_PAGES
[634]530if( DEBUG_PPM_REMOTE_FREE_PAGES < cycle )
[636]531{
532    printk("\n[%s] thread[%x,%x] enter for %d page(s) in cluster %x / ppn %x / cycle %d\n",
[656]533    __FUNCTION__, this->process->pid, this->trdid, 1<<order, page_cxy, ppn, cycle );
534    if( DEBUG_PPM_REMOTE_FREE_PAGES & 1 ) ppm_remote_display( page_cxy );
[636]535}
[632]536#endif
537
538    // build extended pointer on released page descriptor
[636]539    page_xp = XPTR( page_cxy , page_ptr );
[632]540   
541    // build extended pointer on lock protecting remote PPM
[636]542    xptr_t lock_xp = XPTR( page_cxy , &ppm->free_lock );
[632]543
544    // get local pointer on remote PPM page_tbl[] array
[636]545        page_t * pages_tbl = hal_remote_lpt( XPTR( page_cxy , &ppm->pages_tbl ) );
[632]546
547        // get lock protecting free_pages in remote cluster
548        remote_busylock_acquire( lock_xp );
549
550assert( !page_remote_is_flag( page_xp , PG_FREE ) ,
[636]551"page already released : ppn = %x\n" , ppm_page2ppn(XPTR( page_cxy , page_ptr ) ) );
[632]552
553assert( !page_remote_is_flag( page_xp , PG_RESERVED ) ,
[636]554"reserved page : ppn = %x\n" , ppm_page2ppn(XPTR( page_cxy , page_ptr ) ) );
[632]555
[636]556        // set the FREE flag in released page descriptor
[632]557        page_remote_set_flag( page_xp , PG_FREE );
558
[636]559    // initialise loop variables
560    current_ptr   = page_ptr;
[656]561    current_order = order;
[636]562        current_index = page_ptr - ppm->pages_tbl;
563
[632]564        // search the buddy page descriptor
[636]565        // - merge with current page descriptor if buddy found
566        // - exit to release the current page descriptor if buddy not found
567    while( current_order < CONFIG_PPM_MAX_ORDER )
568    {
569        // compute buddy page index and local pointer on page descriptor
[632]570                buddy_index = current_index ^ (1 << current_order);
571                buddy_ptr   = pages_tbl + buddy_index;
[637]572
573        // get buddy order
574        buddy_order = hal_remote_l32( XPTR( page_cxy , &buddy_ptr->order ) );
[636]575       
576        // exit loop if buddy not found
577                if( !page_remote_is_flag( XPTR( page_cxy , buddy_ptr ) , PG_FREE ) || 
[632]578            (buddy_order != current_order) ) break;
579
[636]580        // remove buddy page from its free list in remote cluster
581                list_remote_unlink( page_cxy , &buddy_ptr->list );
582        hal_remote_atomic_add( XPTR( page_cxy , &ppm->free_pages_nr[current_order] ) , -1 );
[632]583
[636]584        // reset order field in buddy page descriptor
585        hal_remote_s32( XPTR( page_cxy , &buddy_ptr->order ) , 0 );
[632]586
[656]587                // compute next values for loop variables
[632]588                current_index &= buddy_index;
[636]589        current_order++;
590        current_ptr = pages_tbl + current_index; 
591
592    }  // end loop on order
593
594        // update current (merged) page descriptor order field
595    hal_remote_s32( XPTR( page_cxy , &current_ptr->order ) , current_order );
[632]596
[636]597        // insert current (merged) page into relevant free list
[656]598        list_remote_add_first( page_cxy, &ppm->free_pages_root[current_order], &current_ptr->list );
[636]599    hal_remote_atomic_add( XPTR( page_cxy , &ppm->free_pages_nr[current_order] ) , 1 );
[632]600
601        // release lock protecting free_pages[] array
602        remote_busylock_release( lock_xp );
603
604    // update DQDT
[636]605    dqdt_decrement_pages( page_cxy , page_ptr->order );
[632]606
[656]607    hal_fence();
608
[634]609#if DEBUG_PPM_REMOTE_FREE_PAGES
610if( DEBUG_PPM_REMOTE_FREE_PAGES < cycle )
[636]611{
612    printk("\n[%s] thread[%x,%x] released %d page(s) in cluster %x / ppn %x / cycle %d\n",
[656]613    __FUNCTION__, this->process->pid, this->trdid, 1<<order, page_cxy, ppn, cycle );
614    if( DEBUG_PPM_REMOTE_FREE_PAGES & 1 ) ppm_remote_display( page_cxy );
[636]615}
[632]616#endif
617
618}  // end ppm_remote_free_pages()
619
620////////////////////////////////////
621void ppm_remote_display( cxy_t cxy )
622{
[1]623        uint32_t       order;
624        list_entry_t * iter;
[634]625    xptr_t         page_xp;
[1]626
[433]627    ppm_t * ppm = &LOCAL_CLUSTER->ppm;
628
[636]629    // get remote PPM general parameters
630    uint32_t   pages_nr   = hal_remote_l32( XPTR( cxy , &ppm->pages_nr ) );
631    void     * vaddr_base = hal_remote_lpt( XPTR( cxy , &ppm->vaddr_base ) ); 
632    void     * pages_tbl  = hal_remote_lpt( XPTR( cxy , &ppm->pages_tbl ) ); 
633
[632]634    // build extended pointer on lock protecting remote PPM
[634]635    xptr_t ppm_lock_xp = XPTR( cxy , &ppm->free_lock );
[1]636
[634]637    // get pointers on TXT0 chdev
638    xptr_t    txt0_xp  = chdev_dir.txt_tx[0];
639    cxy_t     txt0_cxy = GET_CXY( txt0_xp );
640    chdev_t * txt0_ptr = GET_PTR( txt0_xp );
[632]641
[634]642    // build extended pointer on remote TXT0 lock
643    xptr_t  txt_lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock );
[18]644
[634]645        // get PPM lock
646        remote_busylock_acquire( ppm_lock_xp );
647
648    // get TXT0 lock
649    remote_busylock_acquire( txt_lock_xp );
650
[636]651        nolock_printk("\n***** PPM in cluster %x / %d pages / page_tbl %x / vaddr_base %x\n",
652    local_cxy, pages_nr, pages_tbl, vaddr_base );
[634]653
[1]654        for( order = 0 ; order < CONFIG_PPM_MAX_ORDER ; order++ )
655        {
[632]656        // get number of free pages for free_list[order] in remote cluster
657        uint32_t n = hal_remote_l32( XPTR( cxy , &ppm->free_pages_nr[order] ) );
[18]658
[656]659        // display forward free_list[order]
660                nolock_printk("- forward  : order = %d / n = %d : ", order , n );
[632]661                LIST_REMOTE_FOREACH( cxy , &ppm->free_pages_root[order] , iter )
[1]662                {
[634]663            page_xp = XPTR( cxy , LIST_ELEMENT( iter , page_t , list ) );
664                        nolock_printk("%x," , ppm_page2ppn( page_xp ) );
[1]665                }
[634]666                nolock_printk("\n");
[656]667
668        // display backward free_list[order]
669                nolock_printk("- backward : order = %d / n = %d : ", order , n );
670                LIST_REMOTE_FOREACH_BACKWARD( cxy , &ppm->free_pages_root[order] , iter )
671                {
672            page_xp = XPTR( cxy , LIST_ELEMENT( iter , page_t , list ) );
673                        nolock_printk("%x," , ppm_page2ppn( page_xp ) );
674                }
675                nolock_printk("\n");
[1]676        }
677
[634]678        // release TXT0 lock
679        remote_busylock_release( txt_lock_xp );
680
681        // release PPM lock
682        remote_busylock_release( ppm_lock_xp );
[160]683}
[1]684
[632]685////////////////////////////////
686error_t ppm_assert_order( void )
[1]687{
688        uint32_t       order;
689        list_entry_t * iter;
690        page_t       * page;
[18]691
[632]692    ppm_t * ppm = &LOCAL_CLUSTER->ppm;
693
[407]694        for( order=0 ; order < CONFIG_PPM_MAX_ORDER ; order++ )
[1]695        {
696                if( list_is_empty( &ppm->free_pages_root[order] ) ) continue;
[18]697
[1]698                LIST_FOREACH( &ppm->free_pages_root[order] , iter )
699                {
700                        page = LIST_ELEMENT( iter , page_t , list );
[160]701                        if( page->order != order )  return -1;
[1]702                }
703        }
704
[160]705        return 0;
706}
[53]707
[567]708
709//////////////////////////////////////////////////////////////////////////////////////
710//     functions to handle  dirty physical pages
711//////////////////////////////////////////////////////////////////////////////////////
712
[606]713//////////////////////////////////////////
714bool_t ppm_page_do_dirty( xptr_t page_xp )
[567]715{
716        bool_t done = false;
717
[606]718    // get page cluster and local pointer
719    page_t * page_ptr = GET_PTR( page_xp );
720    cxy_t    page_cxy = GET_CXY( page_xp );
721
722    // get local pointer on PPM (same in all clusters)
[567]723        ppm_t * ppm = &LOCAL_CLUSTER->ppm;
724
[606]725    // build extended pointers on page lock, page flags, and PPM dirty list lock
726    xptr_t page_lock_xp  = XPTR( page_cxy , &page_ptr->lock  );   
727    xptr_t page_flags_xp = XPTR( page_cxy , &page_ptr->flags );
728    xptr_t dirty_lock_xp = XPTR( page_cxy , &ppm->dirty_lock );
729           
730        // lock the remote PPM dirty_list
731        remote_queuelock_acquire( dirty_lock_xp );
[567]732
[606]733    // lock the remote page
734    remote_busylock_acquire( page_lock_xp );
735
736    // get remote page flags
737    uint32_t flags = hal_remote_l32( page_flags_xp );
738
739        if( (flags & PG_DIRTY) == 0 )
[567]740        {
741                // set dirty flag in page descriptor
[606]742        hal_remote_s32( page_flags_xp , flags | PG_DIRTY );
[567]743
[632]744                // insert the page in the remote dirty list
745        list_remote_add_first( page_cxy , &ppm->dirty_root , &page_ptr->list );
[606]746
[567]747                done = true;
748        }
749
[606]750    // unlock the remote page
751    remote_busylock_release( page_lock_xp );
[567]752
[606]753        // unlock the remote PPM dirty_list
754        remote_queuelock_release( dirty_lock_xp );
755
[567]756        return done;
757
[606]758} // end ppm_page_do_dirty()
759
760////////////////////////////////////////////
761bool_t ppm_page_undo_dirty( xptr_t page_xp )
[567]762{
763        bool_t done = false;
764
[606]765    // get page cluster and local pointer
766    page_t * page_ptr = GET_PTR( page_xp );
767    cxy_t    page_cxy = GET_CXY( page_xp );
768
769    // get local pointer on PPM (same in all clusters)
[567]770        ppm_t * ppm = &LOCAL_CLUSTER->ppm;
771
[606]772    // build extended pointers on page lock, page flags, and PPM dirty list lock
773    xptr_t page_lock_xp  = XPTR( page_cxy , &page_ptr->lock  );
774    xptr_t page_flags_xp = XPTR( page_cxy , &page_ptr->flags );
775    xptr_t dirty_lock_xp = XPTR( page_cxy , &ppm->dirty_lock );
776           
777        // lock the remote PPM dirty_list
778        remote_queuelock_acquire( XPTR( page_cxy , &ppm->dirty_lock ) );
[567]779
[606]780    // lock the remote page
781    remote_busylock_acquire( page_lock_xp );
782
783    // get remote page flags
784    uint32_t flags = hal_remote_l32( page_flags_xp );
785
786        if( (flags & PG_DIRTY) )  // page is dirty
[567]787        {
[606]788                // reset dirty flag in page descriptor
789        hal_remote_s32( page_flags_xp , flags & (~PG_DIRTY) );
[567]790
[632]791        // remove the page from remote dirty list
792        list_remote_unlink( page_cxy , &page_ptr->list );
[606]793
[567]794                done = true;
795        }
796
[606]797    // unlock the remote page
798    remote_busylock_release( page_lock_xp );
[567]799
[606]800        // unlock the remote PPM dirty_list
801        remote_queuelock_release( dirty_lock_xp );
802
[567]803        return done;
804
[606]805}  // end ppm_page_undo_dirty()
806
807/////////////////////////////////
808void ppm_sync_dirty_pages( void )
[567]809{
[606]810        ppm_t * ppm = &LOCAL_CLUSTER->ppm;
[567]811
[606]812    // get local pointer on PPM dirty_root
813    list_entry_t * dirty_root = &ppm->dirty_root;
814
815    // build extended pointer on PPM dirty_lock
816    xptr_t dirty_lock_xp = XPTR( local_cxy , &ppm->dirty_lock );
817
[567]818        // get the PPM dirty_list lock
[606]819        remote_queuelock_acquire( dirty_lock_xp );
[567]820
821        while( !list_is_empty( &ppm->dirty_root ) )
822        {
[606]823                page_t * page = LIST_FIRST( dirty_root ,  page_t , list );
824        xptr_t   page_xp = XPTR( local_cxy , page );
[567]825
[606]826        // build extended pointer on page lock
827        xptr_t page_lock_xp = XPTR( local_cxy , &page->lock );
828
[567]829                // get the page lock
[606]830                remote_busylock_acquire( page_lock_xp );
[567]831
832                // sync the page
[606]833                vfs_fs_move_page( page_xp , false );  // from mapper to device
[567]834
835                // release the page lock
[606]836                remote_busylock_release( page_lock_xp );
[567]837        }
838
839        // release the PPM dirty_list lock
[606]840        remote_queuelock_release( dirty_lock_xp );
[567]841
[606]842}  // end ppm_sync_dirty_pages()
843
Note: See TracBrowser for help on using the repository browser.