source: trunk/kernel/mm/ppm.c @ 632

Last change on this file since 632 was 632, checked in by alain, 5 years ago

This version replace the RPC by direct remote memory access
for physical pages allacation/release.
It is commited before being tested.

File size: 24.3 KB
Line 
1/*
2 * ppm.c - Per-cluster Physical Pages Manager implementation
3 *
4 * Authors  Ghassan Almaless (2008,2009,2010,2011,2012)
5 *          Alain Greiner    (2016,2017,2018,2019)
6 *
7 * Copyright (c) UPMC Sorbonne Universites
8 *
9 * This file is part of ALMOS-MKH.
10 *
11 * ALMOS-MKH.is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; version 2.0 of the License.
14 *
15 * ALMOS-MKH.is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with ALMOS-MKH.; if not, write to the Free Software Foundation,
22 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
23 */
24
25#include <kernel_config.h>
26#include <hal_kernel_types.h>
27#include <hal_special.h>
28#include <printk.h>
29#include <list.h>
30#include <bits.h>
31#include <page.h>
32#include <dqdt.h>
33#include <busylock.h>
34#include <queuelock.h>
35#include <thread.h>
36#include <cluster.h>
37#include <kmem.h>
38#include <process.h>
39#include <mapper.h>
40#include <ppm.h>
41#include <vfs.h>
42
43////////////////////////////////////////////////////////////////////////////////////////
44//     functions to  translate [ page <-> base <-> ppn ]
45////////////////////////////////////////////////////////////////////////////////////////
46
47
48/////////////////////////////////////////////
49inline xptr_t ppm_page2base( xptr_t page_xp )
50{
51        ppm_t  * ppm      = &LOCAL_CLUSTER->ppm;
52
53    cxy_t    page_cxy = GET_CXY( page_xp );
54    page_t * page_ptr = GET_PTR( page_xp );
55
56   void   * base_ptr = ppm->vaddr_base + 
57                       ((page_ptr - ppm->pages_tbl)<<CONFIG_PPM_PAGE_SHIFT);
58
59        return XPTR( page_cxy , base_ptr );
60
61} // end ppm_page2base()
62
63/////////////////////////////////////////////
64inline xptr_t ppm_base2page( xptr_t base_xp )
65{
66        ppm_t  * ppm = &LOCAL_CLUSTER->ppm;
67
68    cxy_t    base_cxy = GET_CXY( base_xp );
69    void   * base_ptr = GET_PTR( base_xp );
70
71        page_t * page_ptr = ppm->pages_tbl + 
72                        ((base_ptr - ppm->vaddr_base)>>CONFIG_PPM_PAGE_SHIFT);
73
74        return XPTR( base_cxy , page_ptr );
75
76}  // end ppm_base2page()
77
78
79
80///////////////////////////////////////////
81inline ppn_t ppm_page2ppn( xptr_t page_xp )
82{
83        ppm_t  * ppm      = &LOCAL_CLUSTER->ppm;
84
85    cxy_t    page_cxy = GET_CXY( page_xp );
86    page_t * page_ptr = GET_PTR( page_xp );
87
88    paddr_t  paddr    = PADDR( page_cxy , (page_ptr - ppm->pages_tbl)<<CONFIG_PPM_PAGE_SHIFT );
89
90    return (ppn_t)(paddr >> CONFIG_PPM_PAGE_SHIFT);
91
92}  // end hal_page2ppn()
93
94///////////////////////////////////////
95inline xptr_t ppm_ppn2page( ppn_t ppn )
96{
97        ppm_t   * ppm  = &LOCAL_CLUSTER->ppm;
98
99    paddr_t  paddr = ((paddr_t)ppn) << CONFIG_PPM_PAGE_SHIFT;
100
101    cxy_t    cxy   = CXY_FROM_PADDR( paddr );
102    lpa_t    lpa   = LPA_FROM_PADDR( paddr );
103
104    return XPTR( cxy , &ppm->pages_tbl[lpa>>CONFIG_PPM_PAGE_SHIFT] );
105
106}  // end hal_ppn2page
107
108
109
110///////////////////////////////////////
111inline xptr_t ppm_ppn2base( ppn_t ppn )
112{
113        ppm_t  * ppm   = &LOCAL_CLUSTER->ppm;
114   
115    paddr_t  paddr = ((paddr_t)ppn) << CONFIG_PPM_PAGE_SHIFT;
116
117    cxy_t    cxy   = CXY_FROM_PADDR( paddr );
118    lpa_t    lpa   = LPA_FROM_PADDR( paddr );
119
120        return XPTR( cxy , (void *)ppm->vaddr_base + lpa );
121
122}  // end ppm_ppn2base()
123
124///////////////////////////////////////////
125inline ppn_t ppm_base2ppn( xptr_t base_xp )
126{
127        ppm_t  * ppm      = &LOCAL_CLUSTER->ppm;
128
129    cxy_t    base_cxy = GET_CXY( base_xp );
130    void   * base_ptr = GET_PTR( base_xp );
131
132    paddr_t  paddr    = PADDR( base_cxy , (base_ptr - ppm->vaddr_base) );
133
134    return (ppn_t)(paddr >> CONFIG_PPM_PAGE_SHIFT);
135
136}  // end ppm_base2ppn()
137
138
139////////////////////////////////////////////////////////////////////////////////////////
140//     functions to  allocate / release  physical pages
141////////////////////////////////////////////////////////////////////////////////////////
142
143///////////////////////////////////////////
144void ppm_free_pages_nolock( page_t * page )
145{
146        page_t   * buddy;            // searched buddy block page descriptor
147        uint32_t   buddy_index;      // buddy bloc index in page_tbl[]
148        page_t   * current;          // current (merged) block page descriptor
149        uint32_t   current_index;    // current (merged) block index in page_tbl[]
150        uint32_t   current_order;    // current (merged) block order
151
152        ppm_t    * ppm         = &LOCAL_CLUSTER->ppm;
153        page_t   * pages_tbl   = ppm->pages_tbl;
154
155assert( !page_is_flag( page , PG_FREE ) ,
156"page already released : ppn = %x\n" , ppm_page2ppn(XPTR(local_cxy,page)) );
157
158assert( !page_is_flag( page , PG_RESERVED ) ,
159"reserved page : ppn = %x\n" , ppm_page2ppn(XPTR(local_cxy,page)) );
160
161        // update released page descriptor flags
162        page_set_flag( page , PG_FREE );
163
164        // search the buddy page descriptor
165        // - merge with current page descriptor if found
166        // - exit to release the current page descriptor if not found
167        current       = page;
168        current_index = (uint32_t)(page - ppm->pages_tbl);
169        for( current_order = page->order ;
170             current_order < CONFIG_PPM_MAX_ORDER ;
171             current_order++ )
172        {
173                buddy_index = current_index ^ (1 << current_order);
174                buddy       = pages_tbl + buddy_index;
175
176        // exit this loop if buddy block not found
177                if( !page_is_flag( buddy , PG_FREE ) || 
178            (buddy->order != current_order) ) break;
179
180                // remove buddy block from free_list
181                list_unlink( &buddy->list );
182                ppm->free_pages_nr[current_order] --;
183
184        // reset order field in buddy block page descriptor
185                buddy->order = 0;
186
187                // compute merged block index in page_tbl[]
188                current_index &= buddy_index;
189        }
190
191        // update pointer and order field for merged block page descriptor
192        current        = pages_tbl + current_index;
193        current->order = current_order;
194
195        // insert merged block in free list
196        list_add_first( &ppm->free_pages_root[current_order] , &current->list );
197        ppm->free_pages_nr[current_order] ++;
198
199}  // end ppm_free_pages_nolock()
200
201////////////////////////////////////////////
202page_t * ppm_alloc_pages( uint32_t   order )
203{
204        page_t   * current_block;
205        uint32_t   current_order;
206        uint32_t   current_size;
207        page_t   * found_block; 
208
209#if DEBUG_PPM_ALLOC_PAGES
210thread_t * this = CURRENT_THREAD;
211uint32_t cycle = (uint32_t)hal_get_cycles();
212if( DEBUG_PPM_ALLOC_PAGES < cycle )
213printk("\n[%s] thread[%x,%x] enter for %d page(s) in cluster %x / cycle %d\n",
214__FUNCTION__, this->process->pid, this->trdid, 1<<order, cxy, cycle );
215#endif
216
217#if(DEBUG_PPM_ALLOC_PAGES & 0x1)
218if( DEBUG_PPM_ALLOC_PAGES < cycle )
219ppm_remote_display( local_cxy );
220#endif
221
222        ppm_t    * ppm = &LOCAL_CLUSTER->ppm;
223
224// check order
225assert( (order < CONFIG_PPM_MAX_ORDER) , "illegal order argument = %d\n" , order );
226
227    //build extended pointer on lock protecting remote PPM
228    xptr_t lock_xp = XPTR( local_cxy , &ppm->free_lock );
229
230        // take lock protecting free lists
231        remote_busylock_acquire( lock_xp );
232
233        current_block = NULL;
234
235        // find a free block equal or larger to requested size
236        for( current_order = order ; current_order < CONFIG_PPM_MAX_ORDER ; current_order ++ )
237        {
238                if( !list_is_empty( &ppm->free_pages_root[current_order] ) )
239                {
240            // get first free block in this free_list
241                        current_block = LIST_FIRST( &ppm->free_pages_root[current_order] , page_t , list );
242
243            // remove this block from this free_list
244                        list_unlink( &current_block->list );
245
246            // register pointer on found block
247            found_block = current_block;
248
249            // update this free-list number of blocks
250                ppm->free_pages_nr[current_order] --;
251
252            // compute found block size
253                current_size = (1 << current_order);
254
255                        break; 
256                }
257        }
258
259        if( current_block == NULL ) // return failure if no free block found
260        {
261                // release lock protecting free lists
262                remote_busylock_release( lock_xp );
263
264#if DEBUG_PPM_ALLOC_PAGES
265cycle = (uint32_t)hal_get_cycles();
266if( DEBUG_PPM_ALLOC_PAGES < cycle )
267printk("\n[%s] thread[%x,%x] cannot allocate %d page(s) in cluster %x / cycle %d\n",
268__FUNCTION__, this->process->pid, this->trdid, 1<<order, cxy, cycle );
269#endif
270
271                return NULL;
272        }
273
274
275        // split the found block in smaller sub-blocks if required
276        // and update the free-lists accordingly
277        while( current_order > order )
278        {
279                current_order --;
280
281        // update pointer, size, and order fiels for new free block
282                current_size >>= 1;
283                current_block = found_block + current_size;
284                current_block->order = current_order;
285
286        // insert new free block in relevant free_list
287                list_add_first( &ppm->free_pages_root[current_order] , &current_block->list );
288
289        // update number of blocks in free list
290                ppm->free_pages_nr[current_order] ++;
291        }
292
293        // update found block page descriptor
294        page_clear_flag( found_block , PG_FREE );
295        page_refcount_up( found_block );
296        found_block->order = order;
297
298        // release lock protecting free lists
299        remote_busylock_release( lock_xp );
300
301    // update DQDT
302    dqdt_increment_pages( local_cxy , order );
303
304#if DEBUG_PPM_ALLOC_PAGES
305cycle = (uint32_t)hal_get_cycles();
306if( DEBUG_PPM_ALLOC_PAGES < cycle )
307printk("\n[%s] thread[%x,%x] exit for %d page(s) in cluster %x / ppn = %x / cycle %d\n",
308__FUNCTION__, this->process->pid, this->trdid, 
3091<<order, ppm_page2ppn(XPTR( local_cxy , found_block )), cxy, cycle );
310#endif
311
312#if(DEBUG_PPM_ALLOC_PAGES & 0x1)
313if( DEBUG_PPM_ALLOC_PAGES < cycle )
314ppm_remote_display( local_cxy );
315#endif
316
317        return found_block;
318
319}  // end ppm_alloc_pages()
320
321
322////////////////////////////////////
323void ppm_free_pages( page_t * page )
324{
325        ppm_t * ppm = &LOCAL_CLUSTER->ppm;
326
327#if DEBUG_PPM_FREE_PAGES
328thread_t * this  = CURRENT_THREAD;
329uint32_t   cycle = (uint32_t)hal_get_cycles();
330if( DEBUG_PPM_FREE_PAGES < cycle )
331printk("\n[%s] thread[%x,%x] enter for %d page(s) in cluster %x / ppn %x / cycle %d\n",
332__FUNCTION__, this->process->pid, this->trdid, 
3331<<page->order, local_cxy, ppm_page2ppn(XPTR(local_cxy , page)), cycle );
334#endif
335
336#if(DEBUG_PPM_FREE_PAGES & 0x1)
337if( DEBUG_PPM_FREE_PAGES < cycle )
338ppm_remote_display( local_cxy );
339#endif
340
341    //build extended pointer on lock protecting free_lists
342    xptr_t lock_xp = XPTR( local_cxy , &ppm->free_lock );
343
344        // get lock protecting free_pages[] array
345        remote_busylock_acquire( lock_xp );
346
347        ppm_free_pages_nolock( page );
348
349        // release lock protecting free_lists
350        remote_busylock_release( lock_xp );
351
352    // update DQDT
353    dqdt_decrement_pages( local_cxy , page->order );
354
355#if DEBUG_PPM_FREE_PAGES
356cycle = (uint32_t)hal_get_cycles();
357if( DEBUG_PPM_FREE_PAGES < cycle )
358printk("\n[%s] thread[%x,%x] exit for %d page(s) in cluster %x / ppn %x / cycle %d\n",
359__FUNCTION__, this->process->pid, this->trdid, 
3601<<page->order, local_cxy, ppm_page2ppn(XPTR(local_cxy , page)) , cycle );
361#endif
362
363#if(DEBUG_PPM_FREE_PAGES & 0x1)
364if( DEBUG_PPM_FREE_PAGES < cycle )
365ppm_remote_display( local_cxy );
366#endif
367
368}  // end ppm_free_pages()
369
370/////////////////////////////////////////////
371xptr_t ppm_remote_alloc_pages( cxy_t     cxy,
372                               uint32_t  order )
373{
374        uint32_t   current_order;
375        uint32_t   current_size;
376    page_t   * current_block;   
377    page_t   * found_block;
378
379#if DEBUG_PPM_ALLOC_PAGES
380thread_t * this  = CURRENT_THREAD;
381uint32_t   cycle = (uint32_t)hal_get_cycles();
382if( DEBUG_PPM_ALLOC_PAGES < cycle )
383printk("\n[%s] thread[%x,%x] enter for %d small page(s) in cluster %x / cycle %d\n",
384__FUNCTION__, this->process->pid, this->trdid, 1<<order, cxy, cycle );
385#endif
386
387#if(DEBUG_PPM_ALLOC_PAGES & 0x1)
388if( DEBUG_PPM_ALLOC_PAGES < cycle )
389ppm_remote_display( cxy );
390#endif
391
392// check order
393assert( (order < CONFIG_PPM_MAX_ORDER) , "illegal order argument = %d\n" , order );
394
395    // get local pointer on PPM (same in all clusters)
396        ppm_t * ppm = &LOCAL_CLUSTER->ppm;
397
398    //build extended pointer on lock protecting remote PPM
399    xptr_t lock_xp = XPTR( cxy , &ppm->free_lock );
400
401        // take lock protecting free lists in remote cluster
402        remote_busylock_acquire( lock_xp );
403
404    current_block = NULL;   
405
406        // find in remote cluster a free block equal or larger to requested size
407        for( current_order = order ; current_order < CONFIG_PPM_MAX_ORDER ; current_order ++ )
408        {
409        // get local pointer on the root of relevant free_list in remote cluster
410        list_entry_t * root = &ppm->free_pages_root[current_order];
411
412                if( !list_remote_is_empty( cxy , root ) )
413                {
414            // get local pointer on first free page descriptor in remote cluster
415                        current_block = LIST_REMOTE_FIRST( cxy, root , page_t , list );
416
417            // remove first free page from the free-list in remote cluster
418                        list_remote_unlink( cxy , &current_block->list );
419
420            // register found block
421            found_block = current_block;
422
423                // decrement relevant free-list number of items in remote cluster
424                hal_remote_atomic_add( XPTR( cxy , &ppm->free_pages_nr[current_order] ), -1 );
425
426            // compute found block size
427                current_size = (1 << current_order);
428
429                        break;
430                }
431        }
432
433        if( current_block == NULL ) // return failure
434        {
435                // release lock protecting free lists
436                remote_busylock_release( lock_xp );
437
438#if DEBUG_PPM_ALLOC_PAGES
439cycle = (uint32_t)hal_get_cycles();
440if( DEBUG_PPM_ALLOC_PAGES < cycle )
441printk("\n[%s] thread[%x,%x] cannot allocate %d page(s) in cluster %x / cycle %d\n",
442__FUNCTION__, this->process->pid, this->trdid, 1<<order, cxy, cycle );
443#endif
444
445                return XPTR_NULL;
446        }
447
448        // split the found block in smaller sub-blocks if required
449        // and update the free-lists accordingly in remote cluster
450        while( current_order > order )
451        {
452        // update order, size, and local pointer for new free block
453                current_order --;
454                current_size >>= 1;
455                current_block = found_block + current_size;
456
457        // update new free block order field in remote cluster
458                hal_remote_s32( XPTR( cxy , &current_block->order ) , current_order );
459
460        // get local pointer on the root of the relevant free_list in remote cluster 
461        list_entry_t * root = &ppm->free_pages_root[current_order];
462
463        // insert new free block in this free_list
464                list_remote_add_first( cxy , root, &current_block->list );
465
466        // update free-list number of items in remote cluster
467        hal_remote_atomic_add( XPTR(cxy , &ppm->free_pages_nr[current_order]), 1 );
468        }
469
470        // update refcount, flags and order fields in found block remote page descriptor
471        page_remote_clear_flag( XPTR( cxy , found_block ), PG_FREE );
472        page_remote_refcount_up( XPTR( cxy , found_block ) );
473        hal_remote_s32( XPTR( cxy , &found_block->order ) , order );
474   
475        // release lock protecting free lists in remote cluster
476        remote_busylock_release( lock_xp );
477
478    // update DQDT page counter in remote cluster
479    dqdt_increment_pages( cxy , order );
480
481#if DEBUG_PPM_ALLOC_PAGES
482cycle = (uint32_t)hal_get_cycles();
483if( DEBUG_PPM_ALLOC_PAGES < cycle )
484printk("\n[%s] thread[%x,%x] exit for %d page(s) / ppn = %x in cluster %x / cycle %d\n",
485__FUNCTION__, this->process->pid, this->trdid, 
4861<<order, ppm_page2ppn(XPTR( local_cxy , found_block )), cxy, cycle );
487#endif
488
489#if(DEBUG_PPM_ALLOC_PAGES & 0x1)
490if( DEBUG_PPM_ALLOC_PAGES < cycle )
491ppm_remote_display( cxy );
492#endif
493
494        return XPTR( cxy , found_block );
495
496}  // end ppm_remote_alloc_pages()
497
498//////////////////////////////////////////
499void ppm_remote_free_pages( cxy_t     cxy,
500                            page_t  * page )
501{
502    xptr_t     page_xp;          // extended pointer on released page descriptor
503    uint32_t   order;            // released block order
504        page_t   * buddy_ptr;        // searched buddy block page descriptor
505    uint32_t   buddy_order;      // searched buddy block order
506        uint32_t   buddy_index;      // buddy block index in page_tbl[]
507        page_t   * current_ptr;      // current (merged) block page descriptor
508        uint32_t   current_index;    // current (merged) block index in page_tbl[]
509        uint32_t   current_order;    // current (merged) block order
510
511#if DEBUG_PPM_FREE_PAGES
512thread_t * this  = CURRENT_THREAD;
513uint32_t   cycle = (uint32_t)hal_get_cycles();
514if( DEBUG_PPM_FREE_PAGES < cycle )
515printk("\n[%s] thread[%x,%x] enter for %d page(s) in cluster %x / ppn %x / cycle %d\n",
516__FUNCTION__, this->process->pid, this->trdid, 
5171<<page->order, cxy, ppm_page2ppn(XPTR(cxy , page)), cycle );
518#endif
519
520#if(DEBUG_PPM_FREE_PAGES & 0x1)
521if( DEBUG_PPM_FREE_PAGES < cycle )
522ppm_remote_display( cxy );
523#endif
524
525    // build extended pointer on released page descriptor
526    page_xp = XPTR( cxy , page );
527   
528    // get released page order
529    order = hal_remote_l32( XPTR( cxy , &page->order ) );
530
531    // get local pointer on PPM (same in all clusters)
532        ppm_t * ppm = &LOCAL_CLUSTER->ppm;
533
534    // build extended pointer on lock protecting remote PPM
535    xptr_t lock_xp = XPTR( cxy , &ppm->free_lock );
536
537    // get local pointer on remote PPM page_tbl[] array
538        page_t * pages_tbl = hal_remote_lpt( XPTR( cxy , &ppm->pages_tbl ) );
539
540        // get lock protecting free_pages in remote cluster
541        remote_busylock_acquire( lock_xp );
542
543assert( !page_remote_is_flag( page_xp , PG_FREE ) ,
544"page already released : ppn = %x\n" , ppm_page2ppn(XPTR(local_cxy,page)) );
545
546assert( !page_remote_is_flag( page_xp , PG_RESERVED ) ,
547"reserved page : ppn = %x\n" , ppm_page2ppn(XPTR(local_cxy,page)) );
548
549        // update released page descriptor flags
550        page_remote_set_flag( page_xp , PG_FREE );
551
552        // search the buddy page descriptor
553        // - merge with current page descriptor if found
554        // - exit to release the current page descriptor if not found
555        current_ptr   = page;
556        current_index = (uint32_t)(page - ppm->pages_tbl);
557        for( current_order = order ;
558             current_order < CONFIG_PPM_MAX_ORDER ;
559             current_order++ )
560        {
561                buddy_index = current_index ^ (1 << current_order);
562                buddy_ptr   = pages_tbl + buddy_index;
563
564        // get buddy block order
565        buddy_order = hal_remote_l32( XPTR( cxy , &buddy_ptr->order ) );
566
567        // exit loop if buddy block not found
568                if( !page_remote_is_flag( XPTR( cxy , buddy_ptr ) , PG_FREE ) || 
569            (buddy_order != current_order) ) break;
570
571                // remove buddy from free list in remote cluster
572                list_remote_unlink( cxy , &buddy_ptr->list );
573        hal_remote_atomic_add( XPTR( cxy , &ppm->free_pages_nr[current_order] ) , -1 );
574
575        // reset order field in buddy block page descriptor
576        hal_remote_s32( XPTR( cxy , &buddy_ptr->order ) , 0 );
577
578                // compute merged block index in page_tbl[] array
579                current_index &= buddy_index;
580        }
581
582        // update merged page descriptor order field
583        current_ptr = pages_tbl + current_index;
584    hal_remote_s32( XPTR( cxy , &current_ptr->order ) , current_order );
585
586        // insert merged block into relevant free list in remote cluster
587        list_remote_add_first( cxy , &ppm->free_pages_root[current_order] , &current_ptr->list );
588    hal_remote_atomic_add( XPTR( cxy , &ppm->free_pages_nr[current_order] ) , 1 );
589
590        // release lock protecting free_pages[] array
591        remote_busylock_release( lock_xp );
592
593    // update DQDT
594    dqdt_decrement_pages( cxy , page->order );
595
596#if DEBUG_PPM_FREE_PAGES
597cycle = (uint32_t)hal_get_cycles();
598if( DEBUG_PPM_FREE_PAGES < cycle )
599printk("\n[%s] thread[%x,%x] exit for %d page(s) in cluster %x / ppn %x / cycle %d\n",
600__FUNCTION__, this->process->pid, this->trdid, 
6011<<page->order, cxy, ppm_page2ppn(XPTR(cxy , page)), cycle );
602#endif
603
604#if(DEBUG_PPM_FREE_PAGES & 0x1)
605if( DEBUG_PPM_FREE_PAGES < cycle )
606ppm_remote_display( cxy );
607#endif
608
609}  // end ppm_remote_free_pages()
610
611////////////////////////////////////
612void ppm_remote_display( cxy_t cxy )
613{
614        uint32_t       order;
615        list_entry_t * iter;
616        page_t       * page;
617
618    ppm_t * ppm = &LOCAL_CLUSTER->ppm;
619
620    // build extended pointer on lock protecting remote PPM
621    xptr_t lock_xp = XPTR( cxy , &ppm->free_lock );
622
623        // get lock protecting free lists in remote cluster
624        remote_busylock_acquire( lock_xp );
625
626        printk("\n***** PPM in cluster %x / %d pages\n", local_cxy , ppm->pages_nr );
627
628        for( order = 0 ; order < CONFIG_PPM_MAX_ORDER ; order++ )
629        {
630        // get number of free pages for free_list[order] in remote cluster
631        uint32_t n = hal_remote_l32( XPTR( cxy , &ppm->free_pages_nr[order] ) );
632                printk("- order = %d / free_pages = %d\t: ", order , n );
633
634                LIST_REMOTE_FOREACH( cxy , &ppm->free_pages_root[order] , iter )
635                {
636                        page = LIST_ELEMENT( iter , page_t , list );
637                        printk("%x," , page - ppm->pages_tbl );
638                }
639
640                printk("\n");
641        }
642
643        // release lock protecting free lists in remote cluster
644        remote_busylock_release( lock_xp );
645}
646
647////////////////////////////////
648error_t ppm_assert_order( void )
649{
650        uint32_t       order;
651        list_entry_t * iter;
652        page_t       * page;
653
654    ppm_t * ppm = &LOCAL_CLUSTER->ppm;
655
656        for( order=0 ; order < CONFIG_PPM_MAX_ORDER ; order++ )
657        {
658                if( list_is_empty( &ppm->free_pages_root[order] ) ) continue;
659
660                LIST_FOREACH( &ppm->free_pages_root[order] , iter )
661                {
662                        page = LIST_ELEMENT( iter , page_t , list );
663                        if( page->order != order )  return -1;
664                }
665        }
666
667        return 0;
668}
669
670
671//////////////////////////////////////////////////////////////////////////////////////
672//     functions to handle  dirty physical pages
673//////////////////////////////////////////////////////////////////////////////////////
674
675//////////////////////////////////////////
676bool_t ppm_page_do_dirty( xptr_t page_xp )
677{
678        bool_t done = false;
679
680    // get page cluster and local pointer
681    page_t * page_ptr = GET_PTR( page_xp );
682    cxy_t    page_cxy = GET_CXY( page_xp );
683
684    // get local pointer on PPM (same in all clusters)
685        ppm_t * ppm = &LOCAL_CLUSTER->ppm;
686
687    // build extended pointers on page lock, page flags, and PPM dirty list lock
688    xptr_t page_lock_xp  = XPTR( page_cxy , &page_ptr->lock  );   
689    xptr_t page_flags_xp = XPTR( page_cxy , &page_ptr->flags );
690    xptr_t dirty_lock_xp = XPTR( page_cxy , &ppm->dirty_lock );
691           
692        // lock the remote PPM dirty_list
693        remote_queuelock_acquire( dirty_lock_xp );
694
695    // lock the remote page
696    remote_busylock_acquire( page_lock_xp );
697
698    // get remote page flags
699    uint32_t flags = hal_remote_l32( page_flags_xp );
700
701        if( (flags & PG_DIRTY) == 0 )
702        {
703                // set dirty flag in page descriptor
704        hal_remote_s32( page_flags_xp , flags | PG_DIRTY );
705
706                // insert the page in the remote dirty list
707        list_remote_add_first( page_cxy , &ppm->dirty_root , &page_ptr->list );
708
709                done = true;
710        }
711
712    // unlock the remote page
713    remote_busylock_release( page_lock_xp );
714
715        // unlock the remote PPM dirty_list
716        remote_queuelock_release( dirty_lock_xp );
717
718        return done;
719
720} // end ppm_page_do_dirty()
721
722////////////////////////////////////////////
723bool_t ppm_page_undo_dirty( xptr_t page_xp )
724{
725        bool_t done = false;
726
727    // get page cluster and local pointer
728    page_t * page_ptr = GET_PTR( page_xp );
729    cxy_t    page_cxy = GET_CXY( page_xp );
730
731    // get local pointer on PPM (same in all clusters)
732        ppm_t * ppm = &LOCAL_CLUSTER->ppm;
733
734    // build extended pointers on page lock, page flags, and PPM dirty list lock
735    xptr_t page_lock_xp  = XPTR( page_cxy , &page_ptr->lock  );
736    xptr_t page_flags_xp = XPTR( page_cxy , &page_ptr->flags );
737    xptr_t dirty_lock_xp = XPTR( page_cxy , &ppm->dirty_lock );
738           
739        // lock the remote PPM dirty_list
740        remote_queuelock_acquire( XPTR( page_cxy , &ppm->dirty_lock ) );
741
742    // lock the remote page
743    remote_busylock_acquire( page_lock_xp );
744
745    // get remote page flags
746    uint32_t flags = hal_remote_l32( page_flags_xp );
747
748        if( (flags & PG_DIRTY) )  // page is dirty
749        {
750                // reset dirty flag in page descriptor
751        hal_remote_s32( page_flags_xp , flags & (~PG_DIRTY) );
752
753        // remove the page from remote dirty list
754        list_remote_unlink( page_cxy , &page_ptr->list );
755
756                done = true;
757        }
758
759    // unlock the remote page
760    remote_busylock_release( page_lock_xp );
761
762        // unlock the remote PPM dirty_list
763        remote_queuelock_release( dirty_lock_xp );
764
765        return done;
766
767}  // end ppm_page_undo_dirty()
768
769/////////////////////////////////
770void ppm_sync_dirty_pages( void )
771{
772        ppm_t * ppm = &LOCAL_CLUSTER->ppm;
773
774    // get local pointer on PPM dirty_root
775    list_entry_t * dirty_root = &ppm->dirty_root;
776
777    // build extended pointer on PPM dirty_lock
778    xptr_t dirty_lock_xp = XPTR( local_cxy , &ppm->dirty_lock );
779
780        // get the PPM dirty_list lock
781        remote_queuelock_acquire( dirty_lock_xp );
782
783        while( !list_is_empty( &ppm->dirty_root ) )
784        {
785                page_t * page = LIST_FIRST( dirty_root ,  page_t , list );
786        xptr_t   page_xp = XPTR( local_cxy , page );
787
788        // build extended pointer on page lock
789        xptr_t page_lock_xp = XPTR( local_cxy , &page->lock );
790
791                // get the page lock
792                remote_busylock_acquire( page_lock_xp );
793
794                // sync the page
795                vfs_fs_move_page( page_xp , false );  // from mapper to device
796
797                // release the page lock
798                remote_busylock_release( page_lock_xp );
799        }
800
801        // release the PPM dirty_list lock
802        remote_queuelock_release( dirty_lock_xp );
803
804}  // end ppm_sync_dirty_pages()
805
Note: See TracBrowser for help on using the repository browser.