source: trunk/kernel/mm/ppm.c @ 634

Last change on this file since 634 was 634, checked in by alain, 5 years ago

Fix a bug in hal_irqmask.c

File size: 25.2 KB
Line 
1/*
2 * ppm.c - Per-cluster Physical Pages Manager implementation
3 *
4 * Authors  Ghassan Almaless (2008,2009,2010,2011,2012)
5 *          Alain Greiner    (2016,2017,2018,2019)
6 *
7 * Copyright (c) UPMC Sorbonne Universites
8 *
9 * This file is part of ALMOS-MKH.
10 *
11 * ALMOS-MKH.is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; version 2.0 of the License.
14 *
15 * ALMOS-MKH.is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with ALMOS-MKH.; if not, write to the Free Software Foundation,
22 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
23 */
24
25#include <kernel_config.h>
26#include <hal_kernel_types.h>
27#include <hal_special.h>
28#include <printk.h>
29#include <list.h>
30#include <bits.h>
31#include <page.h>
32#include <dqdt.h>
33#include <busylock.h>
34#include <queuelock.h>
35#include <thread.h>
36#include <cluster.h>
37#include <kmem.h>
38#include <process.h>
39#include <mapper.h>
40#include <ppm.h>
41#include <vfs.h>
42
43////////////////////////////////////////////////////////////////////////////////////////
44//         global variables
45////////////////////////////////////////////////////////////////////////////////////////
46
47extern chdev_directory_t    chdev_dir;          // allocated in kernel_init.c
48
49////////////////////////////////////////////////////////////////////////////////////////
50//     functions to  translate [ page <-> base <-> ppn ]
51////////////////////////////////////////////////////////////////////////////////////////
52
53/////////////////////////////////////////////
54inline xptr_t ppm_page2base( xptr_t page_xp )
55{
56        ppm_t  * ppm      = &LOCAL_CLUSTER->ppm;
57
58    cxy_t    page_cxy = GET_CXY( page_xp );
59    page_t * page_ptr = GET_PTR( page_xp );
60
61   void   * base_ptr = ppm->vaddr_base + 
62                       ((page_ptr - ppm->pages_tbl)<<CONFIG_PPM_PAGE_SHIFT);
63
64        return XPTR( page_cxy , base_ptr );
65
66} // end ppm_page2base()
67
68/////////////////////////////////////////////
69inline xptr_t ppm_base2page( xptr_t base_xp )
70{
71        ppm_t  * ppm = &LOCAL_CLUSTER->ppm;
72
73    cxy_t    base_cxy = GET_CXY( base_xp );
74    void   * base_ptr = GET_PTR( base_xp );
75
76        page_t * page_ptr = ppm->pages_tbl + 
77                        ((base_ptr - ppm->vaddr_base)>>CONFIG_PPM_PAGE_SHIFT);
78
79        return XPTR( base_cxy , page_ptr );
80
81}  // end ppm_base2page()
82
83
84
85///////////////////////////////////////////
86inline ppn_t ppm_page2ppn( xptr_t page_xp )
87{
88        ppm_t  * ppm      = &LOCAL_CLUSTER->ppm;
89
90    cxy_t    page_cxy = GET_CXY( page_xp );
91    page_t * page_ptr = GET_PTR( page_xp );
92
93    paddr_t  paddr    = PADDR( page_cxy , (page_ptr - ppm->pages_tbl)<<CONFIG_PPM_PAGE_SHIFT );
94
95    return (ppn_t)(paddr >> CONFIG_PPM_PAGE_SHIFT);
96
97}  // end hal_page2ppn()
98
99///////////////////////////////////////
100inline xptr_t ppm_ppn2page( ppn_t ppn )
101{
102        ppm_t   * ppm  = &LOCAL_CLUSTER->ppm;
103
104    paddr_t  paddr = ((paddr_t)ppn) << CONFIG_PPM_PAGE_SHIFT;
105
106    cxy_t    cxy   = CXY_FROM_PADDR( paddr );
107    lpa_t    lpa   = LPA_FROM_PADDR( paddr );
108
109    return XPTR( cxy , &ppm->pages_tbl[lpa>>CONFIG_PPM_PAGE_SHIFT] );
110
111}  // end hal_ppn2page
112
113
114
115///////////////////////////////////////
116inline xptr_t ppm_ppn2base( ppn_t ppn )
117{
118        ppm_t  * ppm   = &LOCAL_CLUSTER->ppm;
119   
120    paddr_t  paddr = ((paddr_t)ppn) << CONFIG_PPM_PAGE_SHIFT;
121
122    cxy_t    cxy   = CXY_FROM_PADDR( paddr );
123    lpa_t    lpa   = LPA_FROM_PADDR( paddr );
124
125        return XPTR( cxy , (void *)ppm->vaddr_base + lpa );
126
127}  // end ppm_ppn2base()
128
129///////////////////////////////////////////
130inline ppn_t ppm_base2ppn( xptr_t base_xp )
131{
132        ppm_t  * ppm      = &LOCAL_CLUSTER->ppm;
133
134    cxy_t    base_cxy = GET_CXY( base_xp );
135    void   * base_ptr = GET_PTR( base_xp );
136
137    paddr_t  paddr    = PADDR( base_cxy , (base_ptr - ppm->vaddr_base) );
138
139    return (ppn_t)(paddr >> CONFIG_PPM_PAGE_SHIFT);
140
141}  // end ppm_base2ppn()
142
143
144////////////////////////////////////////////////////////////////////////////////////////
145//     functions to  allocate / release  physical pages
146////////////////////////////////////////////////////////////////////////////////////////
147
148///////////////////////////////////////////
149void ppm_free_pages_nolock( page_t * page )
150{
151        page_t   * buddy;            // searched buddy block page descriptor
152        uint32_t   buddy_index;      // buddy bloc index in page_tbl[]
153        page_t   * current;          // current (merged) block page descriptor
154        uint32_t   current_index;    // current (merged) block index in page_tbl[]
155        uint32_t   current_order;    // current (merged) block order
156
157        ppm_t    * ppm         = &LOCAL_CLUSTER->ppm;
158        page_t   * pages_tbl   = ppm->pages_tbl;
159
160assert( !page_is_flag( page , PG_FREE ) ,
161"page already released : ppn = %x\n" , ppm_page2ppn(XPTR(local_cxy,page)) );
162
163assert( !page_is_flag( page , PG_RESERVED ) ,
164"reserved page : ppn = %x\n" , ppm_page2ppn(XPTR(local_cxy,page)) );
165
166        // update released page descriptor flags
167        page_set_flag( page , PG_FREE );
168
169        // search the buddy page descriptor
170        // - merge with current page descriptor if found
171        // - exit to release the current page descriptor if not found
172        current       = page;
173        current_index = (uint32_t)(page - ppm->pages_tbl);
174        for( current_order = page->order ;
175             current_order < CONFIG_PPM_MAX_ORDER ;
176             current_order++ )
177        {
178                buddy_index = current_index ^ (1 << current_order);
179                buddy       = pages_tbl + buddy_index;
180
181        // exit this loop if buddy block not found
182                if( !page_is_flag( buddy , PG_FREE ) || 
183            (buddy->order != current_order) ) break;
184
185                // remove buddy block from free_list
186                list_unlink( &buddy->list );
187                ppm->free_pages_nr[current_order] --;
188
189        // reset order field in buddy block page descriptor
190                buddy->order = 0;
191
192                // compute merged block index in page_tbl[]
193                current_index &= buddy_index;
194        }
195
196        // update pointer and order field for merged block page descriptor
197        current        = pages_tbl + current_index;
198        current->order = current_order;
199
200        // insert merged block in free list
201        list_add_first( &ppm->free_pages_root[current_order] , &current->list );
202        ppm->free_pages_nr[current_order] ++;
203
204}  // end ppm_free_pages_nolock()
205
206////////////////////////////////////////////
207page_t * ppm_alloc_pages( uint32_t   order )
208{
209        page_t   * current_block;
210        uint32_t   current_order;
211        uint32_t   current_size;
212        page_t   * found_block; 
213
214#if DEBUG_PPM_ALLOC_PAGES
215thread_t * this = CURRENT_THREAD;
216uint32_t cycle = (uint32_t)hal_get_cycles();
217if( DEBUG_PPM_ALLOC_PAGES < cycle )
218printk("\n[%s] thread[%x,%x] enter for %d page(s) in cluster %x / cycle %d\n",
219__FUNCTION__, this->process->pid, this->trdid, 1<<order, local_cxy, cycle );
220#endif
221
222#if(DEBUG_PPM_ALLOC_PAGES & 0x1)
223if( DEBUG_PPM_ALLOC_PAGES < cycle )
224ppm_remote_display( local_cxy );
225#endif
226
227        ppm_t    * ppm = &LOCAL_CLUSTER->ppm;
228
229// check order
230assert( (order < CONFIG_PPM_MAX_ORDER) , "illegal order argument = %d\n" , order );
231
232    //build extended pointer on lock protecting remote PPM
233    xptr_t lock_xp = XPTR( local_cxy , &ppm->free_lock );
234
235        // take lock protecting free lists
236        remote_busylock_acquire( lock_xp );
237
238        current_block = NULL;
239
240        // find a free block equal or larger to requested size
241        for( current_order = order ; current_order < CONFIG_PPM_MAX_ORDER ; current_order ++ )
242        {
243                if( !list_is_empty( &ppm->free_pages_root[current_order] ) )
244                {
245            // get first free block in this free_list
246                        current_block = LIST_FIRST( &ppm->free_pages_root[current_order] , page_t , list );
247
248            // remove this block from this free_list
249                        list_unlink( &current_block->list );
250
251            // register pointer on found block
252            found_block = current_block;
253
254            // update this free-list number of blocks
255                ppm->free_pages_nr[current_order] --;
256
257            // compute found block size
258                current_size = (1 << current_order);
259
260                        break; 
261                }
262        }
263
264        if( current_block == NULL ) // return failure if no free block found
265        {
266                // release lock protecting free lists
267                remote_busylock_release( lock_xp );
268
269#if DEBUG_PPM_ALLOC_PAGES
270cycle = (uint32_t)hal_get_cycles();
271if( DEBUG_PPM_ALLOC_PAGES < cycle )
272printk("\n[%s] thread[%x,%x] cannot allocate %d page(s) in cluster %x / cycle %d\n",
273__FUNCTION__, this->process->pid, this->trdid, 1<<order, local_cxy, cycle );
274#endif
275
276                return NULL;
277        }
278
279
280        // split the found block in smaller sub-blocks if required
281        // and update the free-lists accordingly
282        while( current_order > order )
283        {
284                current_order --;
285
286        // update pointer, size, and order fiels for new free block
287                current_size >>= 1;
288                current_block = found_block + current_size;
289                current_block->order = current_order;
290
291        // insert new free block in relevant free_list
292                list_add_first( &ppm->free_pages_root[current_order] , &current_block->list );
293
294        // update number of blocks in free list
295                ppm->free_pages_nr[current_order] ++;
296        }
297
298        // update found block page descriptor
299        page_clear_flag( found_block , PG_FREE );
300        page_refcount_up( found_block );
301        found_block->order = order;
302
303        // release lock protecting free lists
304        remote_busylock_release( lock_xp );
305
306    // update DQDT
307    dqdt_increment_pages( local_cxy , order );
308
309#if DEBUG_PPM_ALLOC_PAGES
310cycle = (uint32_t)hal_get_cycles();
311if( DEBUG_PPM_ALLOC_PAGES < cycle )
312printk("\n[%s] thread[%x,%x] exit for %d page(s) in cluster %x / ppn = %x / cycle %d\n",
313__FUNCTION__, this->process->pid, this->trdid, 
3141<<order, ppm_page2ppn(XPTR( local_cxy , found_block )), local_cxy, cycle );
315#endif
316
317#if(DEBUG_PPM_ALLOC_PAGES & 0x1)
318if( DEBUG_PPM_ALLOC_PAGES < cycle )
319ppm_remote_display( local_cxy );
320#endif
321
322        return found_block;
323
324}  // end ppm_alloc_pages()
325
326
327////////////////////////////////////
328void ppm_free_pages( page_t * page )
329{
330        ppm_t * ppm = &LOCAL_CLUSTER->ppm;
331
332#if DEBUG_PPM_FREE_PAGES
333thread_t * this  = CURRENT_THREAD;
334uint32_t   cycle = (uint32_t)hal_get_cycles();
335if( DEBUG_PPM_FREE_PAGES < cycle )
336printk("\n[%s] thread[%x,%x] enter for %d page(s) in cluster %x / ppn %x / cycle %d\n",
337__FUNCTION__, this->process->pid, this->trdid, 
3381<<page->order, local_cxy, ppm_page2ppn(XPTR(local_cxy , page)), cycle );
339#endif
340
341#if(DEBUG_PPM_FREE_PAGES & 0x1)
342if( DEBUG_PPM_FREE_PAGES < cycle )
343ppm_remote_display( local_cxy );
344#endif
345
346    //build extended pointer on lock protecting free_lists
347    xptr_t lock_xp = XPTR( local_cxy , &ppm->free_lock );
348
349        // get lock protecting free_pages[] array
350        remote_busylock_acquire( lock_xp );
351
352        ppm_free_pages_nolock( page );
353
354        // release lock protecting free_lists
355        remote_busylock_release( lock_xp );
356
357    // update DQDT
358    dqdt_decrement_pages( local_cxy , page->order );
359
360#if DEBUG_PPM_FREE_PAGES
361cycle = (uint32_t)hal_get_cycles();
362if( DEBUG_PPM_FREE_PAGES < cycle )
363printk("\n[%s] thread[%x,%x] exit for %d page(s) in cluster %x / ppn %x / cycle %d\n",
364__FUNCTION__, this->process->pid, this->trdid, 
3651<<page->order, local_cxy, ppm_page2ppn(XPTR(local_cxy , page)) , cycle );
366#endif
367
368#if(DEBUG_PPM_FREE_PAGES & 0x1)
369if( DEBUG_PPM_FREE_PAGES < cycle )
370ppm_remote_display( local_cxy );
371#endif
372
373}  // end ppm_free_pages()
374
375/////////////////////////////////////////////
376xptr_t ppm_remote_alloc_pages( cxy_t     cxy,
377                               uint32_t  order )
378{
379        uint32_t   current_order;
380        uint32_t   current_size;
381    page_t   * current_block;   
382    page_t   * found_block;
383
384#if DEBUG_PPM_REMOTE_ALLOC_PAGES
385thread_t * this  = CURRENT_THREAD;
386uint32_t   cycle = (uint32_t)hal_get_cycles();
387if( DEBUG_PPM_REMOTE_ALLOC_PAGES < cycle )
388printk("\n[%s] thread[%x,%x] enter for %d small page(s) in cluster %x / cycle %d\n",
389__FUNCTION__, this->process->pid, this->trdid, 1<<order, cxy, cycle );
390#endif
391
392#if(DEBUG_PPM_REMOTE_ALLOC_PAGES & 0x1)
393if( DEBUG_PPM_REMOTE_ALLOC_PAGES < cycle )
394ppm_remote_display( cxy );
395#endif
396
397// check order
398assert( (order < CONFIG_PPM_MAX_ORDER) , "illegal order argument = %d\n" , order );
399
400    // get local pointer on PPM (same in all clusters)
401        ppm_t * ppm = &LOCAL_CLUSTER->ppm;
402
403    //build extended pointer on lock protecting remote PPM
404    xptr_t lock_xp = XPTR( cxy , &ppm->free_lock );
405
406        // take lock protecting free lists in remote cluster
407        remote_busylock_acquire( lock_xp );
408
409    current_block = NULL;   
410
411        // find in remote cluster a free block equal or larger to requested size
412        for( current_order = order ; current_order < CONFIG_PPM_MAX_ORDER ; current_order ++ )
413        {
414        // get local pointer on the root of relevant free_list in remote cluster
415        list_entry_t * root = &ppm->free_pages_root[current_order];
416
417                if( !list_remote_is_empty( cxy , root ) )
418                {
419            // get local pointer on first free page descriptor in remote cluster
420                        current_block = LIST_REMOTE_FIRST( cxy, root , page_t , list );
421
422            // remove first free page from the free-list in remote cluster
423                        list_remote_unlink( cxy , &current_block->list );
424
425            // register found block
426            found_block = current_block;
427
428                // decrement relevant free-list number of items in remote cluster
429                hal_remote_atomic_add( XPTR( cxy , &ppm->free_pages_nr[current_order] ), -1 );
430
431            // compute found block size
432                current_size = (1 << current_order);
433
434                        break;
435                }
436        }
437
438        if( current_block == NULL ) // return failure
439        {
440                // release lock protecting free lists
441                remote_busylock_release( lock_xp );
442
443#if DEBUG_REMOTE_PPM_ALLOC_PAGES
444cycle = (uint32_t)hal_get_cycles();
445if( DEBUG_PPM_REMOTE_ALLOC_PAGES < cycle )
446printk("\n[%s] thread[%x,%x] cannot allocate %d page(s) in cluster %x / cycle %d\n",
447__FUNCTION__, this->process->pid, this->trdid, 1<<order, cxy, cycle );
448#endif
449
450                return XPTR_NULL;
451        }
452
453        // split the found block in smaller sub-blocks if required
454        // and update the free-lists accordingly in remote cluster
455        while( current_order > order )
456        {
457        // update order, size, and local pointer for new free block
458                current_order --;
459                current_size >>= 1;
460                current_block = found_block + current_size;
461
462        // update new free block order field in remote cluster
463                hal_remote_s32( XPTR( cxy , &current_block->order ) , current_order );
464
465        // get local pointer on the root of the relevant free_list in remote cluster 
466        list_entry_t * root = &ppm->free_pages_root[current_order];
467
468        // insert new free block in this free_list
469                list_remote_add_first( cxy , root, &current_block->list );
470
471        // update free-list number of items in remote cluster
472        hal_remote_atomic_add( XPTR(cxy , &ppm->free_pages_nr[current_order]), 1 );
473        }
474
475        // update refcount, flags and order fields in found block
476        page_remote_clear_flag( XPTR( cxy , found_block ), PG_FREE );
477        page_remote_refcount_up( XPTR( cxy , found_block ) );
478        hal_remote_s32( XPTR( cxy , &found_block->order ) , order );
479   
480        // release lock protecting free lists in remote cluster
481        remote_busylock_release( lock_xp );
482
483    // update DQDT page counter in remote cluster
484    dqdt_increment_pages( cxy , order );
485
486#if DEBUG_PPM_REMOTE_ALLOC_PAGES
487cycle = (uint32_t)hal_get_cycles();
488if( DEBUG_PPM_REMOTE_ALLOC_PAGES < cycle )
489printk("\n[%s] thread[%x,%x] exit for %d page(s) / ppn = %x in cluster %x / cycle %d\n",
490__FUNCTION__, this->process->pid, this->trdid, 
4911<<order, ppm_page2ppn(XPTR( local_cxy , found_block )), cxy, cycle );
492#endif
493
494#if(DEBUG_PPM_REMOTE_ALLOC_PAGES & 0x1)
495if( DEBUG_PPM_REMOTE_ALLOC_PAGES < cycle )
496ppm_remote_display( cxy );
497#endif
498
499        return XPTR( cxy , found_block );
500
501}  // end ppm_remote_alloc_pages()
502
503//////////////////////////////////////////
504void ppm_remote_free_pages( cxy_t     cxy,
505                            page_t  * page )
506{
507    xptr_t     page_xp;          // extended pointer on released page descriptor
508    uint32_t   order;            // released block order
509        page_t   * buddy_ptr;        // searched buddy block page descriptor
510    uint32_t   buddy_order;      // searched buddy block order
511        uint32_t   buddy_index;      // buddy block index in page_tbl[]
512        page_t   * current_ptr;      // current (merged) block page descriptor
513        uint32_t   current_index;    // current (merged) block index in page_tbl[]
514        uint32_t   current_order;    // current (merged) block order
515
516#if DEBUG_PPM_REMOTE_FREE_PAGES
517thread_t * this  = CURRENT_THREAD;
518uint32_t   cycle = (uint32_t)hal_get_cycles();
519if( DEBUG_PPM_REMOTE_FREE_PAGES < cycle )
520printk("\n[%s] thread[%x,%x] enter for %d page(s) / cxy %x / ppn %x / cycle %d\n",
521__FUNCTION__, this->process->pid, this->trdid, 
5221<<page->order, cxy, ppm_page2ppn(XPTR(cxy , page)), cycle );
523#endif
524
525#if(DEBUG_PPM_REMOTE_FREE_PAGES & 0x1)
526if( DEBUG_PPM_REMOTE_FREE_PAGES < cycle )
527ppm_remote_display( cxy );
528#endif
529
530    // build extended pointer on released page descriptor
531    page_xp = XPTR( cxy , page );
532   
533    // get released page order
534    order = hal_remote_l32( XPTR( cxy , &page->order ) );
535
536    // get local pointer on PPM (same in all clusters)
537        ppm_t * ppm = &LOCAL_CLUSTER->ppm;
538
539    // build extended pointer on lock protecting remote PPM
540    xptr_t lock_xp = XPTR( cxy , &ppm->free_lock );
541
542    // get local pointer on remote PPM page_tbl[] array
543        page_t * pages_tbl = hal_remote_lpt( XPTR( cxy , &ppm->pages_tbl ) );
544
545        // get lock protecting free_pages in remote cluster
546        remote_busylock_acquire( lock_xp );
547
548assert( !page_remote_is_flag( page_xp , PG_FREE ) ,
549"page already released : ppn = %x\n" , ppm_page2ppn(XPTR(local_cxy,page)) );
550
551assert( !page_remote_is_flag( page_xp , PG_RESERVED ) ,
552"reserved page : ppn = %x\n" , ppm_page2ppn(XPTR(local_cxy,page)) );
553
554        // update released page descriptor flags
555        page_remote_set_flag( page_xp , PG_FREE );
556
557        // search the buddy page descriptor
558        // - merge with current page descriptor if found
559        // - exit to release the current page descriptor if not found
560        current_ptr   = page;
561        current_index = (uint32_t)(page - ppm->pages_tbl);
562        for( current_order = order ;
563             current_order < CONFIG_PPM_MAX_ORDER ;
564             current_order++ )
565        {
566                buddy_index = current_index ^ (1 << current_order);
567                buddy_ptr   = pages_tbl + buddy_index;
568
569        // get buddy block order
570        buddy_order = hal_remote_l32( XPTR( cxy , &buddy_ptr->order ) );
571
572        // exit loop if buddy block not found
573                if( !page_remote_is_flag( XPTR( cxy , buddy_ptr ) , PG_FREE ) || 
574            (buddy_order != current_order) ) break;
575
576                // remove buddy from free list in remote cluster
577                list_remote_unlink( cxy , &buddy_ptr->list );
578        hal_remote_atomic_add( XPTR( cxy , &ppm->free_pages_nr[current_order] ) , -1 );
579
580        // reset order field in buddy block page descriptor
581        hal_remote_s32( XPTR( cxy , &buddy_ptr->order ) , 0 );
582
583                // compute merged block index in page_tbl[] array
584                current_index &= buddy_index;
585        }
586
587        // update merged page descriptor order field
588        current_ptr = pages_tbl + current_index;
589    hal_remote_s32( XPTR( cxy , &current_ptr->order ) , current_order );
590
591        // insert merged block into relevant free list in remote cluster
592        list_remote_add_first( cxy , &ppm->free_pages_root[current_order] , &current_ptr->list );
593    hal_remote_atomic_add( XPTR( cxy , &ppm->free_pages_nr[current_order] ) , 1 );
594
595        // release lock protecting free_pages[] array
596        remote_busylock_release( lock_xp );
597
598    // update DQDT
599    dqdt_decrement_pages( cxy , page->order );
600
601#if DEBUG_PPM_REMOTE_FREE_PAGES
602cycle = (uint32_t)hal_get_cycles();
603if( DEBUG_PPM_REMOTE_FREE_PAGES < cycle )
604printk("\n[%s] thread[%x,%x] exit for %d page(s) in cluster %x / ppn %x / cycle %d\n",
605__FUNCTION__, this->process->pid, this->trdid, 
6061<<page->order, cxy, ppm_page2ppn(XPTR(cxy , page)), cycle );
607#endif
608
609#if(DEBUG_PPM_REMOTE_FREE_PAGES & 0x1)
610if( DEBUG_PPM_REMOTE_FREE_PAGES < cycle )
611ppm_remote_display( cxy );
612#endif
613
614}  // end ppm_remote_free_pages()
615
616////////////////////////////////////
617void ppm_remote_display( cxy_t cxy )
618{
619        uint32_t       order;
620        list_entry_t * iter;
621    xptr_t         page_xp;
622
623    ppm_t * ppm = &LOCAL_CLUSTER->ppm;
624
625    // build extended pointer on lock protecting remote PPM
626    xptr_t ppm_lock_xp = XPTR( cxy , &ppm->free_lock );
627
628    // get pointers on TXT0 chdev
629    xptr_t    txt0_xp  = chdev_dir.txt_tx[0];
630    cxy_t     txt0_cxy = GET_CXY( txt0_xp );
631    chdev_t * txt0_ptr = GET_PTR( txt0_xp );
632
633    // build extended pointer on remote TXT0 lock
634    xptr_t  txt_lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock );
635
636        // get PPM lock
637        remote_busylock_acquire( ppm_lock_xp );
638
639    // get TXT0 lock
640    remote_busylock_acquire( txt_lock_xp );
641
642        nolock_printk("\n***** PPM in cluster %x / %d pages\n", local_cxy , ppm->pages_nr );
643
644        for( order = 0 ; order < CONFIG_PPM_MAX_ORDER ; order++ )
645        {
646        // get number of free pages for free_list[order] in remote cluster
647        uint32_t n = hal_remote_l32( XPTR( cxy , &ppm->free_pages_nr[order] ) );
648
649                nolock_printk("- order = %d / n = %d\t: ", order , n );
650
651                LIST_REMOTE_FOREACH( cxy , &ppm->free_pages_root[order] , iter )
652                {
653            // build extended pointer on page descriptor
654            page_xp = XPTR( cxy , LIST_ELEMENT( iter , page_t , list ) );
655
656            // display PPN
657                        nolock_printk("%x," , ppm_page2ppn( page_xp ) );
658                }
659
660                nolock_printk("\n");
661        }
662
663        // release TXT0 lock
664        remote_busylock_release( txt_lock_xp );
665
666        // release PPM lock
667        remote_busylock_release( ppm_lock_xp );
668}
669
670////////////////////////////////
671error_t ppm_assert_order( void )
672{
673        uint32_t       order;
674        list_entry_t * iter;
675        page_t       * page;
676
677    ppm_t * ppm = &LOCAL_CLUSTER->ppm;
678
679        for( order=0 ; order < CONFIG_PPM_MAX_ORDER ; order++ )
680        {
681                if( list_is_empty( &ppm->free_pages_root[order] ) ) continue;
682
683                LIST_FOREACH( &ppm->free_pages_root[order] , iter )
684                {
685                        page = LIST_ELEMENT( iter , page_t , list );
686                        if( page->order != order )  return -1;
687                }
688        }
689
690        return 0;
691}
692
693
694//////////////////////////////////////////////////////////////////////////////////////
695//     functions to handle  dirty physical pages
696//////////////////////////////////////////////////////////////////////////////////////
697
698//////////////////////////////////////////
699bool_t ppm_page_do_dirty( xptr_t page_xp )
700{
701        bool_t done = false;
702
703    // get page cluster and local pointer
704    page_t * page_ptr = GET_PTR( page_xp );
705    cxy_t    page_cxy = GET_CXY( page_xp );
706
707    // get local pointer on PPM (same in all clusters)
708        ppm_t * ppm = &LOCAL_CLUSTER->ppm;
709
710    // build extended pointers on page lock, page flags, and PPM dirty list lock
711    xptr_t page_lock_xp  = XPTR( page_cxy , &page_ptr->lock  );   
712    xptr_t page_flags_xp = XPTR( page_cxy , &page_ptr->flags );
713    xptr_t dirty_lock_xp = XPTR( page_cxy , &ppm->dirty_lock );
714           
715        // lock the remote PPM dirty_list
716        remote_queuelock_acquire( dirty_lock_xp );
717
718    // lock the remote page
719    remote_busylock_acquire( page_lock_xp );
720
721    // get remote page flags
722    uint32_t flags = hal_remote_l32( page_flags_xp );
723
724        if( (flags & PG_DIRTY) == 0 )
725        {
726                // set dirty flag in page descriptor
727        hal_remote_s32( page_flags_xp , flags | PG_DIRTY );
728
729                // insert the page in the remote dirty list
730        list_remote_add_first( page_cxy , &ppm->dirty_root , &page_ptr->list );
731
732                done = true;
733        }
734
735    // unlock the remote page
736    remote_busylock_release( page_lock_xp );
737
738        // unlock the remote PPM dirty_list
739        remote_queuelock_release( dirty_lock_xp );
740
741        return done;
742
743} // end ppm_page_do_dirty()
744
745////////////////////////////////////////////
746bool_t ppm_page_undo_dirty( xptr_t page_xp )
747{
748        bool_t done = false;
749
750    // get page cluster and local pointer
751    page_t * page_ptr = GET_PTR( page_xp );
752    cxy_t    page_cxy = GET_CXY( page_xp );
753
754    // get local pointer on PPM (same in all clusters)
755        ppm_t * ppm = &LOCAL_CLUSTER->ppm;
756
757    // build extended pointers on page lock, page flags, and PPM dirty list lock
758    xptr_t page_lock_xp  = XPTR( page_cxy , &page_ptr->lock  );
759    xptr_t page_flags_xp = XPTR( page_cxy , &page_ptr->flags );
760    xptr_t dirty_lock_xp = XPTR( page_cxy , &ppm->dirty_lock );
761           
762        // lock the remote PPM dirty_list
763        remote_queuelock_acquire( XPTR( page_cxy , &ppm->dirty_lock ) );
764
765    // lock the remote page
766    remote_busylock_acquire( page_lock_xp );
767
768    // get remote page flags
769    uint32_t flags = hal_remote_l32( page_flags_xp );
770
771        if( (flags & PG_DIRTY) )  // page is dirty
772        {
773                // reset dirty flag in page descriptor
774        hal_remote_s32( page_flags_xp , flags & (~PG_DIRTY) );
775
776        // remove the page from remote dirty list
777        list_remote_unlink( page_cxy , &page_ptr->list );
778
779                done = true;
780        }
781
782    // unlock the remote page
783    remote_busylock_release( page_lock_xp );
784
785        // unlock the remote PPM dirty_list
786        remote_queuelock_release( dirty_lock_xp );
787
788        return done;
789
790}  // end ppm_page_undo_dirty()
791
792/////////////////////////////////
793void ppm_sync_dirty_pages( void )
794{
795        ppm_t * ppm = &LOCAL_CLUSTER->ppm;
796
797    // get local pointer on PPM dirty_root
798    list_entry_t * dirty_root = &ppm->dirty_root;
799
800    // build extended pointer on PPM dirty_lock
801    xptr_t dirty_lock_xp = XPTR( local_cxy , &ppm->dirty_lock );
802
803        // get the PPM dirty_list lock
804        remote_queuelock_acquire( dirty_lock_xp );
805
806        while( !list_is_empty( &ppm->dirty_root ) )
807        {
808                page_t * page = LIST_FIRST( dirty_root ,  page_t , list );
809        xptr_t   page_xp = XPTR( local_cxy , page );
810
811        // build extended pointer on page lock
812        xptr_t page_lock_xp = XPTR( local_cxy , &page->lock );
813
814                // get the page lock
815                remote_busylock_acquire( page_lock_xp );
816
817                // sync the page
818                vfs_fs_move_page( page_xp , false );  // from mapper to device
819
820                // release the page lock
821                remote_busylock_release( page_lock_xp );
822        }
823
824        // release the PPM dirty_list lock
825        remote_queuelock_release( dirty_lock_xp );
826
827}  // end ppm_sync_dirty_pages()
828
Note: See TracBrowser for help on using the repository browser.