source: trunk/kernel/mm/ppm.c @ 635

Last change on this file since 635 was 635, checked in by alain, 5 years ago

This version is a major evolution: The physical memory allocators,
defined in the kmem.c, ppm.c, and kcm.c files have been modified
to support remote accesses. The RPCs that were previously user
to allocate physical memory in a remote cluster have been removed.
This has been done to cure a dead-lock in case of concurrent page-faults.

This version 2.2 has been tested on a (4 clusters / 2 cores per cluster)
TSAR architecture, for both the "sort" and the "fft" applications.

File size: 25.0 KB
Line 
1/*
2 * ppm.c - Per-cluster Physical Pages Manager implementation
3 *
4 * Authors  Ghassan Almaless (2008,2009,2010,2011,2012)
5 *          Alain Greiner    (2016,2017,2018,2019)
6 *
7 * Copyright (c) UPMC Sorbonne Universites
8 *
9 * This file is part of ALMOS-MKH.
10 *
11 * ALMOS-MKH.is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; version 2.0 of the License.
14 *
15 * ALMOS-MKH.is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with ALMOS-MKH.; if not, write to the Free Software Foundation,
22 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
23 */
24
25#include <kernel_config.h>
26#include <hal_kernel_types.h>
27#include <hal_special.h>
28#include <printk.h>
29#include <list.h>
30#include <bits.h>
31#include <page.h>
32#include <dqdt.h>
33#include <busylock.h>
34#include <queuelock.h>
35#include <thread.h>
36#include <cluster.h>
37#include <kmem.h>
38#include <process.h>
39#include <mapper.h>
40#include <ppm.h>
41#include <vfs.h>
42
43////////////////////////////////////////////////////////////////////////////////////////
44//         global variables
45////////////////////////////////////////////////////////////////////////////////////////
46
47extern chdev_directory_t    chdev_dir;          // allocated in kernel_init.c
48
49////////////////////////////////////////////////////////////////////////////////////////
50//     functions to  translate [ page <-> base <-> ppn ]
51////////////////////////////////////////////////////////////////////////////////////////
52
53/////////////////////////////////////////////
54inline xptr_t ppm_page2base( xptr_t page_xp )
55{
56        ppm_t  * ppm      = &LOCAL_CLUSTER->ppm;
57
58    cxy_t    page_cxy = GET_CXY( page_xp );
59    page_t * page_ptr = GET_PTR( page_xp );
60
61   void   * base_ptr = ppm->vaddr_base + 
62                       ((page_ptr - ppm->pages_tbl)<<CONFIG_PPM_PAGE_SHIFT);
63
64        return XPTR( page_cxy , base_ptr );
65
66} // end ppm_page2base()
67
68/////////////////////////////////////////////
69inline xptr_t ppm_base2page( xptr_t base_xp )
70{
71        ppm_t  * ppm = &LOCAL_CLUSTER->ppm;
72
73    cxy_t    base_cxy = GET_CXY( base_xp );
74    void   * base_ptr = GET_PTR( base_xp );
75
76        page_t * page_ptr = ppm->pages_tbl + 
77                        ((base_ptr - ppm->vaddr_base)>>CONFIG_PPM_PAGE_SHIFT);
78
79        return XPTR( base_cxy , page_ptr );
80
81}  // end ppm_base2page()
82
83
84
85///////////////////////////////////////////
86inline ppn_t ppm_page2ppn( xptr_t page_xp )
87{
88        ppm_t  * ppm      = &LOCAL_CLUSTER->ppm;
89
90    cxy_t    page_cxy = GET_CXY( page_xp );
91    page_t * page_ptr = GET_PTR( page_xp );
92
93    paddr_t  paddr    = PADDR( page_cxy , (page_ptr - ppm->pages_tbl)<<CONFIG_PPM_PAGE_SHIFT );
94
95    return (ppn_t)(paddr >> CONFIG_PPM_PAGE_SHIFT);
96
97}  // end hal_page2ppn()
98
99///////////////////////////////////////
100inline xptr_t ppm_ppn2page( ppn_t ppn )
101{
102        ppm_t   * ppm  = &LOCAL_CLUSTER->ppm;
103
104    paddr_t  paddr = ((paddr_t)ppn) << CONFIG_PPM_PAGE_SHIFT;
105
106    cxy_t    cxy   = CXY_FROM_PADDR( paddr );
107    lpa_t    lpa   = LPA_FROM_PADDR( paddr );
108
109    return XPTR( cxy , &ppm->pages_tbl[lpa>>CONFIG_PPM_PAGE_SHIFT] );
110
111}  // end hal_ppn2page
112
113
114
115///////////////////////////////////////
116inline xptr_t ppm_ppn2base( ppn_t ppn )
117{
118        ppm_t  * ppm   = &LOCAL_CLUSTER->ppm;
119   
120    paddr_t  paddr = ((paddr_t)ppn) << CONFIG_PPM_PAGE_SHIFT;
121
122    cxy_t    cxy   = CXY_FROM_PADDR( paddr );
123    lpa_t    lpa   = LPA_FROM_PADDR( paddr );
124
125        return XPTR( cxy , (void *)ppm->vaddr_base + lpa );
126
127}  // end ppm_ppn2base()
128
129///////////////////////////////////////////
130inline ppn_t ppm_base2ppn( xptr_t base_xp )
131{
132        ppm_t  * ppm      = &LOCAL_CLUSTER->ppm;
133
134    cxy_t    base_cxy = GET_CXY( base_xp );
135    void   * base_ptr = GET_PTR( base_xp );
136
137    paddr_t  paddr    = PADDR( base_cxy , (base_ptr - ppm->vaddr_base) );
138
139    return (ppn_t)(paddr >> CONFIG_PPM_PAGE_SHIFT);
140
141}  // end ppm_base2ppn()
142
143
144////////////////////////////////////////////////////////////////////////////////////////
145//     functions to  allocate / release  physical pages
146////////////////////////////////////////////////////////////////////////////////////////
147
148///////////////////////////////////////////
149void ppm_free_pages_nolock( page_t * page )
150{
151        page_t   * buddy;            // searched buddy block page descriptor
152        uint32_t   buddy_index;      // buddy bloc index in page_tbl[]
153        page_t   * current;          // current (merged) block page descriptor
154        uint32_t   current_index;    // current (merged) block index in page_tbl[]
155        uint32_t   current_order;    // current (merged) block order
156
157        ppm_t    * ppm         = &LOCAL_CLUSTER->ppm;
158        page_t   * pages_tbl   = ppm->pages_tbl;
159
160assert( !page_is_flag( page , PG_FREE ) ,
161"page already released : ppn = %x\n" , ppm_page2ppn(XPTR(local_cxy,page)) );
162
163assert( !page_is_flag( page , PG_RESERVED ) ,
164"reserved page : ppn = %x\n" , ppm_page2ppn(XPTR(local_cxy,page)) );
165
166        // update released page descriptor flags
167        page_set_flag( page , PG_FREE );
168
169        // search the buddy page descriptor
170        // - merge with current page descriptor if found
171        // - exit to release the current page descriptor if not found
172        current       = page;
173        current_index = (uint32_t)(page - ppm->pages_tbl);
174        for( current_order = page->order ;
175             current_order < CONFIG_PPM_MAX_ORDER ;
176             current_order++ )
177        {
178                buddy_index = current_index ^ (1 << current_order);
179                buddy       = pages_tbl + buddy_index;
180
181        // exit this loop if buddy block not found
182                if( !page_is_flag( buddy , PG_FREE ) || 
183            (buddy->order != current_order) ) break;
184
185                // remove buddy block from free_list
186                list_unlink( &buddy->list );
187                ppm->free_pages_nr[current_order] --;
188
189        // reset order field in buddy block page descriptor
190                buddy->order = 0;
191
192                // compute merged block index in page_tbl[]
193                current_index &= buddy_index;
194        }
195
196        // update pointer and order field for merged block page descriptor
197        current        = pages_tbl + current_index;
198        current->order = current_order;
199
200        // insert merged block in free list
201        list_add_first( &ppm->free_pages_root[current_order] , &current->list );
202        ppm->free_pages_nr[current_order] ++;
203
204}  // end ppm_free_pages_nolock()
205
206////////////////////////////////////////////
207page_t * ppm_alloc_pages( uint32_t   order )
208{
209        page_t   * current_block;
210        uint32_t   current_order;
211        uint32_t   current_size;
212        page_t   * found_block; 
213
214    thread_t * this = CURRENT_THREAD;
215
216#if DEBUG_PPM_ALLOC_PAGES
217uint32_t cycle = (uint32_t)hal_get_cycles();
218if( DEBUG_PPM_ALLOC_PAGES < cycle )
219printk("\n[%s] thread[%x,%x] enter for %d page(s) in cluster %x / cycle %d\n",
220__FUNCTION__, this->process->pid, this->trdid, 1<<order, local_cxy, cycle );
221#endif
222
223#if(DEBUG_PPM_ALLOC_PAGES & 0x1)
224if( DEBUG_PPM_ALLOC_PAGES < cycle )
225ppm_remote_display( local_cxy );
226#endif
227
228        ppm_t    * ppm = &LOCAL_CLUSTER->ppm;
229
230// check order
231assert( (order < CONFIG_PPM_MAX_ORDER) , "illegal order argument = %d\n" , order );
232
233    //build extended pointer on lock protecting remote PPM
234    xptr_t lock_xp = XPTR( local_cxy , &ppm->free_lock );
235
236        // take lock protecting free lists
237        remote_busylock_acquire( lock_xp );
238
239        current_block = NULL;
240    current_order = order;
241
242        // search a free block equal or larger than requested size
243        while( current_order < CONFIG_PPM_MAX_ORDER )
244        {
245        // get local pointer on the root of relevant free_list (same in all clusters)
246        list_entry_t * root = &ppm->free_pages_root[current_order];
247
248                if( !list_is_empty( root ) )
249                {
250            // get first free block in this free_list
251                        current_block = LIST_FIRST( root , page_t , list );
252
253            // remove this block from this free_list
254                        list_unlink( &current_block->list );
255                ppm->free_pages_nr[current_order] --;
256
257            // register pointer on found block
258            found_block = current_block;
259
260            // compute found block size
261                current_size = (1 << current_order);
262
263                        break; 
264                }
265
266        // increment loop index
267        current_order++;
268        }
269
270        if( current_block == NULL ) // return failure if no free block found
271        {
272                // release lock protecting free lists
273                remote_busylock_release( lock_xp );
274
275        printk("\n[%s] thread[%x,%x] cannot allocate %d page(s) in cluster %x\n",
276        __FUNCTION__, this->process->pid, this->trdid, 1<<order, local_cxy );
277
278                return NULL;
279        }
280
281
282        // split the found block in smaller sub-blocks if required
283        // and update the free-lists accordingly
284        while( current_order > order )
285        {
286        // update size and order
287                current_order --;
288                current_size >>= 1;
289
290        // update order fiels in new free block
291                current_block = found_block + current_size;
292                current_block->order = current_order;
293
294        // insert new free block in relevant free_list
295                list_add_first( &ppm->free_pages_root[current_order] , &current_block->list );
296                ppm->free_pages_nr[current_order] ++;
297        }
298
299        // update found block page descriptor
300        page_clear_flag( found_block , PG_FREE );
301        page_refcount_up( found_block );
302        found_block->order = order;
303
304        // release lock protecting free lists
305        remote_busylock_release( lock_xp );
306
307    // update DQDT
308    dqdt_increment_pages( local_cxy , order );
309
310#if DEBUG_PPM_ALLOC_PAGES
311cycle = (uint32_t)hal_get_cycles();
312if( DEBUG_PPM_ALLOC_PAGES < cycle )
313printk("\n[%s] thread[%x,%x] exit for %d page(s) in cluster %x / ppn = %x / cycle %d\n",
314__FUNCTION__, this->process->pid, this->trdid, 
3151<<order, local_cxy, ppm_page2ppn(XPTR( local_cxy , found_block )), cycle );
316#endif
317
318#if(DEBUG_PPM_ALLOC_PAGES & 0x1)
319if( DEBUG_PPM_ALLOC_PAGES < cycle )
320ppm_remote_display( local_cxy );
321#endif
322
323        return found_block;
324
325}  // end ppm_alloc_pages()
326
327
328////////////////////////////////////
329void ppm_free_pages( page_t * page )
330{
331        ppm_t * ppm = &LOCAL_CLUSTER->ppm;
332
333#if DEBUG_PPM_FREE_PAGES
334thread_t * this  = CURRENT_THREAD;
335uint32_t   cycle = (uint32_t)hal_get_cycles();
336if( DEBUG_PPM_FREE_PAGES < cycle )
337printk("\n[%s] thread[%x,%x] enter for %d page(s) in cluster %x / ppn %x / cycle %d\n",
338__FUNCTION__, this->process->pid, this->trdid, 
3391<<page->order, local_cxy, ppm_page2ppn(XPTR(local_cxy , page)), cycle );
340#endif
341
342#if(DEBUG_PPM_FREE_PAGES & 0x1)
343if( DEBUG_PPM_FREE_PAGES < cycle )
344ppm_remote_display( local_cxy );
345#endif
346
347    //build extended pointer on lock protecting free_lists
348    xptr_t lock_xp = XPTR( local_cxy , &ppm->free_lock );
349
350        // get lock protecting free_pages[] array
351        remote_busylock_acquire( lock_xp );
352
353        ppm_free_pages_nolock( page );
354
355        // release lock protecting free_lists
356        remote_busylock_release( lock_xp );
357
358    // update DQDT
359    dqdt_decrement_pages( local_cxy , page->order );
360
361#if DEBUG_PPM_FREE_PAGES
362cycle = (uint32_t)hal_get_cycles();
363if( DEBUG_PPM_FREE_PAGES < cycle )
364printk("\n[%s] thread[%x,%x] exit for %d page(s) in cluster %x / ppn %x / cycle %d\n",
365__FUNCTION__, this->process->pid, this->trdid, 
3661<<page->order, local_cxy, ppm_page2ppn(XPTR(local_cxy , page)) , cycle );
367#endif
368
369#if(DEBUG_PPM_FREE_PAGES & 0x1)
370if( DEBUG_PPM_FREE_PAGES < cycle )
371ppm_remote_display( local_cxy );
372#endif
373
374}  // end ppm_free_pages()
375
376/////////////////////////////////////////////
377void * ppm_remote_alloc_pages( cxy_t     cxy,
378                               uint32_t  order )
379{
380        uint32_t   current_order;
381        uint32_t   current_size;
382    page_t   * current_block;   
383    page_t   * found_block;
384
385    thread_t * this  = CURRENT_THREAD;
386
387#if DEBUG_PPM_REMOTE_ALLOC_PAGES
388uint32_t   cycle = (uint32_t)hal_get_cycles();
389if( DEBUG_PPM_REMOTE_ALLOC_PAGES < cycle )
390printk("\n[%s] thread[%x,%x] enter for %d small page(s) in cluster %x / cycle %d\n",
391__FUNCTION__, this->process->pid, this->trdid, 1<<order, cxy, cycle );
392#endif
393
394#if(DEBUG_PPM_REMOTE_ALLOC_PAGES & 0x1)
395if( DEBUG_PPM_REMOTE_ALLOC_PAGES < cycle )
396ppm_remote_display( cxy );
397#endif
398
399// check order
400assert( (order < CONFIG_PPM_MAX_ORDER) , "illegal order argument = %d\n" , order );
401
402    // get local pointer on PPM (same in all clusters)
403        ppm_t * ppm = &LOCAL_CLUSTER->ppm;
404
405    //build extended pointer on lock protecting remote PPM
406    xptr_t lock_xp = XPTR( cxy , &ppm->free_lock );
407
408        // take lock protecting free lists in remote cluster
409        remote_busylock_acquire( lock_xp );
410
411    current_block = NULL;   
412    current_order = order;
413
414    // search a free block equal or larger than requested size
415    while( current_order < CONFIG_PPM_MAX_ORDER )
416    {
417        // get local pointer on the root of relevant free_list (same in all clusters)
418        list_entry_t * root = &ppm->free_pages_root[current_order];
419
420                if( !list_remote_is_empty( cxy , root ) )  // list non empty => success
421                {
422            // get local pointer on first free page descriptor in remote cluster
423                        current_block = LIST_REMOTE_FIRST( cxy, root , page_t , list );
424
425            // remove first free page from the free-list in remote cluster
426                        list_remote_unlink( cxy , &current_block->list );
427                hal_remote_atomic_add( XPTR( cxy , &ppm->free_pages_nr[current_order] ), -1 );
428
429            // register found block
430            found_block = current_block;
431
432            // compute found block size
433                current_size = (1 << current_order);
434
435                        break;
436                }
437
438        // increment loop index
439        current_order++;
440        }
441
442        if( current_block == NULL ) // return failure
443        {
444                // release lock protecting free lists
445                remote_busylock_release( lock_xp );
446
447        printk("\n[ERROR] in %s : thread[%x,%x] cannot allocate %d page(s) in cluster %x\n",
448        __FUNCTION__, this->process->pid, this->trdid, 1<<order, cxy );
449
450                return XPTR_NULL;
451        }
452
453        // split the found block in smaller sub-blocks if required
454        // and update the free-lists accordingly in remote cluster
455        while( current_order > order )
456        {
457        // update order and size
458                current_order --;
459                current_size >>= 1;
460
461        // update new free block order field in remote cluster
462                current_block = found_block + current_size;
463                hal_remote_s32( XPTR( cxy , &current_block->order ) , current_order );
464
465        // get local pointer on the root of the relevant free_list in remote cluster 
466        list_entry_t * root = &ppm->free_pages_root[current_order];
467
468        // insert new free block in this free_list
469                list_remote_add_first( cxy , root, &current_block->list );
470
471        // update free-list number of items in remote cluster
472        hal_remote_atomic_add( XPTR(cxy , &ppm->free_pages_nr[current_order]), 1 );
473        }
474
475        // update refcount, flags and order fields in found block
476        page_remote_clear_flag( XPTR( cxy , found_block ), PG_FREE );
477        page_remote_refcount_up( XPTR( cxy , found_block ) );
478        hal_remote_s32( XPTR( cxy , &found_block->order ) , order );
479   
480        // release lock protecting free lists in remote cluster
481        remote_busylock_release( lock_xp );
482
483    // update DQDT page counter in remote cluster
484    dqdt_increment_pages( cxy , order );
485
486#if DEBUG_PPM_REMOTE_ALLOC_PAGES
487cycle = (uint32_t)hal_get_cycles();
488if( DEBUG_PPM_REMOTE_ALLOC_PAGES < cycle )
489printk("\n[%s] thread[%x,%x] exit for %d page(s) / ppn = %x in cluster %x / cycle %d\n",
490__FUNCTION__, this->process->pid, this->trdid, 
4911<<order, ppm_page2ppn(XPTR( local_cxy , found_block )), cxy, cycle );
492#endif
493
494#if(DEBUG_PPM_REMOTE_ALLOC_PAGES & 0x1)
495if( DEBUG_PPM_REMOTE_ALLOC_PAGES < cycle )
496ppm_remote_display( cxy );
497#endif
498
499        return found_block;
500
501}  // end ppm_remote_alloc_pages()
502
503//////////////////////////////////////////
504void ppm_remote_free_pages( cxy_t     cxy,
505                            page_t  * page )
506{
507    xptr_t     page_xp;          // extended pointer on released page descriptor
508    uint32_t   order;            // released block order
509        page_t   * buddy_ptr;        // searched buddy block page descriptor
510    uint32_t   buddy_order;      // searched buddy block order
511        uint32_t   buddy_index;      // buddy block index in page_tbl[]
512        page_t   * current_ptr;      // current (merged) block page descriptor
513        uint32_t   current_index;    // current (merged) block index in page_tbl[]
514        uint32_t   current_order;    // current (merged) block order
515
516#if DEBUG_PPM_REMOTE_FREE_PAGES
517thread_t * this  = CURRENT_THREAD;
518uint32_t   cycle = (uint32_t)hal_get_cycles();
519if( DEBUG_PPM_REMOTE_FREE_PAGES < cycle )
520printk("\n[%s] thread[%x,%x] enter for %d page(s) / cxy %x / ppn %x / cycle %d\n",
521__FUNCTION__, this->process->pid, this->trdid, 
5221<<page->order, cxy, ppm_page2ppn(XPTR(cxy , page)), cycle );
523#endif
524
525#if(DEBUG_PPM_REMOTE_FREE_PAGES & 0x1)
526if( DEBUG_PPM_REMOTE_FREE_PAGES < cycle )
527ppm_remote_display( cxy );
528#endif
529
530    // build extended pointer on released page descriptor
531    page_xp = XPTR( cxy , page );
532   
533    // get released page order
534    order = hal_remote_l32( XPTR( cxy , &page->order ) );
535
536    // get local pointer on PPM (same in all clusters)
537        ppm_t * ppm = &LOCAL_CLUSTER->ppm;
538
539    // build extended pointer on lock protecting remote PPM
540    xptr_t lock_xp = XPTR( cxy , &ppm->free_lock );
541
542    // get local pointer on remote PPM page_tbl[] array
543        page_t * pages_tbl = hal_remote_lpt( XPTR( cxy , &ppm->pages_tbl ) );
544
545        // get lock protecting free_pages in remote cluster
546        remote_busylock_acquire( lock_xp );
547
548assert( !page_remote_is_flag( page_xp , PG_FREE ) ,
549"page already released : ppn = %x\n" , ppm_page2ppn(XPTR(local_cxy,page)) );
550
551assert( !page_remote_is_flag( page_xp , PG_RESERVED ) ,
552"reserved page : ppn = %x\n" , ppm_page2ppn(XPTR(local_cxy,page)) );
553
554        // update released page descriptor flags
555        page_remote_set_flag( page_xp , PG_FREE );
556
557        // search the buddy page descriptor
558        // - merge with current page descriptor if found
559        // - exit to release the current page descriptor if not found
560        current_ptr   = page;
561        current_index = (uint32_t)(page - ppm->pages_tbl);
562        for( current_order = order ;
563             current_order < CONFIG_PPM_MAX_ORDER ;
564             current_order++ )
565        {
566                buddy_index = current_index ^ (1 << current_order);
567                buddy_ptr   = pages_tbl + buddy_index;
568
569        // get buddy block order
570        buddy_order = hal_remote_l32( XPTR( cxy , &buddy_ptr->order ) );
571
572        // exit loop if buddy block not found
573                if( !page_remote_is_flag( XPTR( cxy , buddy_ptr ) , PG_FREE ) || 
574            (buddy_order != current_order) ) break;
575
576                // remove buddy from free list in remote cluster
577                list_remote_unlink( cxy , &buddy_ptr->list );
578        hal_remote_atomic_add( XPTR( cxy , &ppm->free_pages_nr[current_order] ) , -1 );
579
580        // reset order field in buddy block page descriptor
581        hal_remote_s32( XPTR( cxy , &buddy_ptr->order ) , 0 );
582
583                // compute merged block index in page_tbl[] array
584                current_index &= buddy_index;
585        }
586
587        // update merged page descriptor order field
588        current_ptr = pages_tbl + current_index;
589    hal_remote_s32( XPTR( cxy , &current_ptr->order ) , current_order );
590
591        // insert merged block into relevant free list in remote cluster
592        list_remote_add_first( cxy , &ppm->free_pages_root[current_order] , &current_ptr->list );
593    hal_remote_atomic_add( XPTR( cxy , &ppm->free_pages_nr[current_order] ) , 1 );
594
595        // release lock protecting free_pages[] array
596        remote_busylock_release( lock_xp );
597
598    // update DQDT
599    dqdt_decrement_pages( cxy , page->order );
600
601#if DEBUG_PPM_REMOTE_FREE_PAGES
602cycle = (uint32_t)hal_get_cycles();
603if( DEBUG_PPM_REMOTE_FREE_PAGES < cycle )
604printk("\n[%s] thread[%x,%x] exit for %d page(s) in cluster %x / ppn %x / cycle %d\n",
605__FUNCTION__, this->process->pid, this->trdid, 
6061<<page->order, cxy, ppm_page2ppn(XPTR(cxy , page)), cycle );
607#endif
608
609#if(DEBUG_PPM_REMOTE_FREE_PAGES & 0x1)
610if( DEBUG_PPM_REMOTE_FREE_PAGES < cycle )
611ppm_remote_display( cxy );
612#endif
613
614}  // end ppm_remote_free_pages()
615
616////////////////////////////////////
617void ppm_remote_display( cxy_t cxy )
618{
619        uint32_t       order;
620        list_entry_t * iter;
621    xptr_t         page_xp;
622
623    ppm_t * ppm = &LOCAL_CLUSTER->ppm;
624
625    // build extended pointer on lock protecting remote PPM
626    xptr_t ppm_lock_xp = XPTR( cxy , &ppm->free_lock );
627
628    // get pointers on TXT0 chdev
629    xptr_t    txt0_xp  = chdev_dir.txt_tx[0];
630    cxy_t     txt0_cxy = GET_CXY( txt0_xp );
631    chdev_t * txt0_ptr = GET_PTR( txt0_xp );
632
633    // build extended pointer on remote TXT0 lock
634    xptr_t  txt_lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock );
635
636        // get PPM lock
637        remote_busylock_acquire( ppm_lock_xp );
638
639    // get TXT0 lock
640    remote_busylock_acquire( txt_lock_xp );
641
642        nolock_printk("\n***** PPM in cluster %x / %d pages\n", local_cxy , ppm->pages_nr );
643
644        for( order = 0 ; order < CONFIG_PPM_MAX_ORDER ; order++ )
645        {
646        // get number of free pages for free_list[order] in remote cluster
647        uint32_t n = hal_remote_l32( XPTR( cxy , &ppm->free_pages_nr[order] ) );
648
649                nolock_printk("- order = %d / n = %d\t: ", order , n );
650
651                LIST_REMOTE_FOREACH( cxy , &ppm->free_pages_root[order] , iter )
652                {
653            // build extended pointer on page descriptor
654            page_xp = XPTR( cxy , LIST_ELEMENT( iter , page_t , list ) );
655
656            // display PPN
657                        nolock_printk("%x," , ppm_page2ppn( page_xp ) );
658                }
659
660                nolock_printk("\n");
661        }
662
663        // release TXT0 lock
664        remote_busylock_release( txt_lock_xp );
665
666        // release PPM lock
667        remote_busylock_release( ppm_lock_xp );
668}
669
670////////////////////////////////
671error_t ppm_assert_order( void )
672{
673        uint32_t       order;
674        list_entry_t * iter;
675        page_t       * page;
676
677    ppm_t * ppm = &LOCAL_CLUSTER->ppm;
678
679        for( order=0 ; order < CONFIG_PPM_MAX_ORDER ; order++ )
680        {
681                if( list_is_empty( &ppm->free_pages_root[order] ) ) continue;
682
683                LIST_FOREACH( &ppm->free_pages_root[order] , iter )
684                {
685                        page = LIST_ELEMENT( iter , page_t , list );
686                        if( page->order != order )  return -1;
687                }
688        }
689
690        return 0;
691}
692
693
694//////////////////////////////////////////////////////////////////////////////////////
695//     functions to handle  dirty physical pages
696//////////////////////////////////////////////////////////////////////////////////////
697
698//////////////////////////////////////////
699bool_t ppm_page_do_dirty( xptr_t page_xp )
700{
701        bool_t done = false;
702
703    // get page cluster and local pointer
704    page_t * page_ptr = GET_PTR( page_xp );
705    cxy_t    page_cxy = GET_CXY( page_xp );
706
707    // get local pointer on PPM (same in all clusters)
708        ppm_t * ppm = &LOCAL_CLUSTER->ppm;
709
710    // build extended pointers on page lock, page flags, and PPM dirty list lock
711    xptr_t page_lock_xp  = XPTR( page_cxy , &page_ptr->lock  );   
712    xptr_t page_flags_xp = XPTR( page_cxy , &page_ptr->flags );
713    xptr_t dirty_lock_xp = XPTR( page_cxy , &ppm->dirty_lock );
714           
715        // lock the remote PPM dirty_list
716        remote_queuelock_acquire( dirty_lock_xp );
717
718    // lock the remote page
719    remote_busylock_acquire( page_lock_xp );
720
721    // get remote page flags
722    uint32_t flags = hal_remote_l32( page_flags_xp );
723
724        if( (flags & PG_DIRTY) == 0 )
725        {
726                // set dirty flag in page descriptor
727        hal_remote_s32( page_flags_xp , flags | PG_DIRTY );
728
729                // insert the page in the remote dirty list
730        list_remote_add_first( page_cxy , &ppm->dirty_root , &page_ptr->list );
731
732                done = true;
733        }
734
735    // unlock the remote page
736    remote_busylock_release( page_lock_xp );
737
738        // unlock the remote PPM dirty_list
739        remote_queuelock_release( dirty_lock_xp );
740
741        return done;
742
743} // end ppm_page_do_dirty()
744
745////////////////////////////////////////////
746bool_t ppm_page_undo_dirty( xptr_t page_xp )
747{
748        bool_t done = false;
749
750    // get page cluster and local pointer
751    page_t * page_ptr = GET_PTR( page_xp );
752    cxy_t    page_cxy = GET_CXY( page_xp );
753
754    // get local pointer on PPM (same in all clusters)
755        ppm_t * ppm = &LOCAL_CLUSTER->ppm;
756
757    // build extended pointers on page lock, page flags, and PPM dirty list lock
758    xptr_t page_lock_xp  = XPTR( page_cxy , &page_ptr->lock  );
759    xptr_t page_flags_xp = XPTR( page_cxy , &page_ptr->flags );
760    xptr_t dirty_lock_xp = XPTR( page_cxy , &ppm->dirty_lock );
761           
762        // lock the remote PPM dirty_list
763        remote_queuelock_acquire( XPTR( page_cxy , &ppm->dirty_lock ) );
764
765    // lock the remote page
766    remote_busylock_acquire( page_lock_xp );
767
768    // get remote page flags
769    uint32_t flags = hal_remote_l32( page_flags_xp );
770
771        if( (flags & PG_DIRTY) )  // page is dirty
772        {
773                // reset dirty flag in page descriptor
774        hal_remote_s32( page_flags_xp , flags & (~PG_DIRTY) );
775
776        // remove the page from remote dirty list
777        list_remote_unlink( page_cxy , &page_ptr->list );
778
779                done = true;
780        }
781
782    // unlock the remote page
783    remote_busylock_release( page_lock_xp );
784
785        // unlock the remote PPM dirty_list
786        remote_queuelock_release( dirty_lock_xp );
787
788        return done;
789
790}  // end ppm_page_undo_dirty()
791
792/////////////////////////////////
793void ppm_sync_dirty_pages( void )
794{
795        ppm_t * ppm = &LOCAL_CLUSTER->ppm;
796
797    // get local pointer on PPM dirty_root
798    list_entry_t * dirty_root = &ppm->dirty_root;
799
800    // build extended pointer on PPM dirty_lock
801    xptr_t dirty_lock_xp = XPTR( local_cxy , &ppm->dirty_lock );
802
803        // get the PPM dirty_list lock
804        remote_queuelock_acquire( dirty_lock_xp );
805
806        while( !list_is_empty( &ppm->dirty_root ) )
807        {
808                page_t * page = LIST_FIRST( dirty_root ,  page_t , list );
809        xptr_t   page_xp = XPTR( local_cxy , page );
810
811        // build extended pointer on page lock
812        xptr_t page_lock_xp = XPTR( local_cxy , &page->lock );
813
814                // get the page lock
815                remote_busylock_acquire( page_lock_xp );
816
817                // sync the page
818                vfs_fs_move_page( page_xp , false );  // from mapper to device
819
820                // release the page lock
821                remote_busylock_release( page_lock_xp );
822        }
823
824        // release the PPM dirty_list lock
825        remote_queuelock_release( dirty_lock_xp );
826
827}  // end ppm_sync_dirty_pages()
828
Note: See TracBrowser for help on using the repository browser.