source: trunk/kernel/mm/ppm.c @ 636

Last change on this file since 636 was 636, checked in by alain, 5 years ago

Fix a bug in list_remote_add_first() and list_remote_add_last() functions,
used by the physical memory allocator, that corrupted the PPM state.

File size: 25.3 KB
Line 
1/*
2 * ppm.c -  Physical Pages Manager implementation
3 *
4 * Authors  Ghassan Almaless (2008,2009,2010,2011,2012)
5 *          Alain Greiner    (2016,2017,2018,2019)
6 *
7 * Copyright (c) UPMC Sorbonne Universites
8 *
9 * This file is part of ALMOS-MKH.
10 *
11 * ALMOS-MKH.is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; version 2.0 of the License.
14 *
15 * ALMOS-MKH.is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with ALMOS-MKH.; if not, write to the Free Software Foundation,
22 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
23 */
24
25#include <kernel_config.h>
26#include <hal_kernel_types.h>
27#include <hal_special.h>
28#include <printk.h>
29#include <list.h>
30#include <bits.h>
31#include <page.h>
32#include <dqdt.h>
33#include <busylock.h>
34#include <queuelock.h>
35#include <thread.h>
36#include <cluster.h>
37#include <kmem.h>
38#include <process.h>
39#include <mapper.h>
40#include <ppm.h>
41#include <vfs.h>
42
43////////////////////////////////////////////////////////////////////////////////////////
44//         global variables
45////////////////////////////////////////////////////////////////////////////////////////
46
47extern chdev_directory_t    chdev_dir;          // allocated in kernel_init.c
48
49////////////////////////////////////////////////////////////////////////////////////////
50//     functions to  translate [ page <-> base <-> ppn ]
51////////////////////////////////////////////////////////////////////////////////////////
52
53/////////////////////////////////////////////
54inline xptr_t ppm_page2base( xptr_t page_xp )
55{
56        ppm_t  * ppm      = &LOCAL_CLUSTER->ppm;
57
58    cxy_t    page_cxy = GET_CXY( page_xp );
59    page_t * page_ptr = GET_PTR( page_xp );
60
61   void   * base_ptr = ppm->vaddr_base + 
62                       ((page_ptr - ppm->pages_tbl)<<CONFIG_PPM_PAGE_SHIFT);
63
64        return XPTR( page_cxy , base_ptr );
65
66} // end ppm_page2base()
67
68/////////////////////////////////////////////
69inline xptr_t ppm_base2page( xptr_t base_xp )
70{
71        ppm_t  * ppm = &LOCAL_CLUSTER->ppm;
72
73    cxy_t    base_cxy = GET_CXY( base_xp );
74    void   * base_ptr = GET_PTR( base_xp );
75
76        page_t * page_ptr = ppm->pages_tbl + 
77                        ((base_ptr - ppm->vaddr_base)>>CONFIG_PPM_PAGE_SHIFT);
78
79        return XPTR( base_cxy , page_ptr );
80
81}  // end ppm_base2page()
82
83
84
85///////////////////////////////////////////
86inline ppn_t ppm_page2ppn( xptr_t page_xp )
87{
88        ppm_t  * ppm      = &LOCAL_CLUSTER->ppm;
89
90    cxy_t    page_cxy = GET_CXY( page_xp );
91    page_t * page_ptr = GET_PTR( page_xp );
92
93    paddr_t  paddr    = PADDR( page_cxy , (page_ptr - ppm->pages_tbl)<<CONFIG_PPM_PAGE_SHIFT );
94
95    return (ppn_t)(paddr >> CONFIG_PPM_PAGE_SHIFT);
96
97}  // end hal_page2ppn()
98
99///////////////////////////////////////
100inline xptr_t ppm_ppn2page( ppn_t ppn )
101{
102        ppm_t   * ppm  = &LOCAL_CLUSTER->ppm;
103
104    paddr_t  paddr = ((paddr_t)ppn) << CONFIG_PPM_PAGE_SHIFT;
105
106    cxy_t    cxy   = CXY_FROM_PADDR( paddr );
107    lpa_t    lpa   = LPA_FROM_PADDR( paddr );
108
109    return XPTR( cxy , &ppm->pages_tbl[lpa>>CONFIG_PPM_PAGE_SHIFT] );
110
111}  // end hal_ppn2page
112
113
114
115///////////////////////////////////////
116inline xptr_t ppm_ppn2base( ppn_t ppn )
117{
118        ppm_t  * ppm   = &LOCAL_CLUSTER->ppm;
119   
120    paddr_t  paddr = ((paddr_t)ppn) << CONFIG_PPM_PAGE_SHIFT;
121
122    cxy_t    cxy   = CXY_FROM_PADDR( paddr );
123    lpa_t    lpa   = LPA_FROM_PADDR( paddr );
124
125        return XPTR( cxy , (void *)ppm->vaddr_base + lpa );
126
127}  // end ppm_ppn2base()
128
129///////////////////////////////////////////
130inline ppn_t ppm_base2ppn( xptr_t base_xp )
131{
132        ppm_t  * ppm      = &LOCAL_CLUSTER->ppm;
133
134    cxy_t    base_cxy = GET_CXY( base_xp );
135    void   * base_ptr = GET_PTR( base_xp );
136
137    paddr_t  paddr    = PADDR( base_cxy , (base_ptr - ppm->vaddr_base) );
138
139    return (ppn_t)(paddr >> CONFIG_PPM_PAGE_SHIFT);
140
141}  // end ppm_base2ppn()
142
143
144////////////////////////////////////////////////////////////////////////////////////////
145//     functions to  allocate / release  physical pages
146////////////////////////////////////////////////////////////////////////////////////////
147
148///////////////////////////////////////////
149void ppm_free_pages_nolock( page_t * page )
150{
151        page_t   * buddy;               // searched buddy page descriptor
152        uint32_t   buddy_index;         // buddy page index in page_tbl[]
153        page_t   * current;             // current (merged) page descriptor
154        uint32_t   current_index;       // current (merged) page index in page_tbl[]
155        uint32_t   current_order;       // current (merged) page order
156
157        ppm_t    * ppm         = &LOCAL_CLUSTER->ppm;
158        page_t   * pages_tbl   = ppm->pages_tbl;
159
160assert( !page_is_flag( page , PG_FREE ) ,
161"page already released : ppn = %x\n" , ppm_page2ppn( XPTR( local_cxy , page ) ) );
162
163assert( !page_is_flag( page , PG_RESERVED ) ,
164"reserved page : ppn = %x\n" , ppm_page2ppn( XPTR( local_cxy , page ) ) );
165
166        // set FREE flag in released page descriptor
167        page_set_flag( page , PG_FREE );
168
169    // initialise loop variables
170    current       = page;
171    current_order = page->order;
172        current_index = page - ppm->pages_tbl;
173
174        // search the buddy page descriptor
175        // - merge with current page if buddy found
176        // - exit to release the current page when buddy not found
177    while( current_order < CONFIG_PPM_MAX_ORDER )
178    {
179        // compute buddy page index and page descriptor
180                buddy_index = current_index ^ (1 << current_order);
181                buddy       = pages_tbl + buddy_index;
182       
183        // exit loop if buddy not found in current free list
184                if( !page_is_flag( buddy , PG_FREE ) || (buddy->order != current_order) ) break;
185
186        // remove buddy page from current free_list
187                list_unlink( &buddy->list );
188                ppm->free_pages_nr[current_order] --;
189
190        // reset order field in buddy page descriptor
191                buddy->order = 0;
192
193                // compute next (merged) page index in page_tbl[]
194                current_index &= buddy_index;
195
196        // compute next (merged) page order
197        current_order++;
198
199        // compute next (merged) page descripror
200        current = pages_tbl + current_index; 
201    }
202
203        // update order field for merged page descriptor
204        current->order = current_order;
205
206        // insert merged page in relevant free list
207        list_add_first( &ppm->free_pages_root[current_order] , &current->list );
208        ppm->free_pages_nr[current_order] ++;
209
210}  // end ppm_free_pages_nolock()
211
212
213////////////////////////////////////////////
214page_t * ppm_alloc_pages( uint32_t   order )
215{
216        page_t   * current_block;
217        uint32_t   current_order;
218        uint32_t   current_size;
219        page_t   * found_block; 
220
221    thread_t * this = CURRENT_THREAD;
222
223#if DEBUG_PPM_ALLOC_PAGES
224uint32_t cycle = (uint32_t)hal_get_cycles();
225#endif
226
227#if (DEBUG_PPM_ALLOC_PAGES & 1)
228if( DEBUG_PPM_ALLOC_PAGES < cycle )
229{
230    printk("\n[%s] thread[%x,%x] enter for %d page(s) in cluster %x / cycle %d\n",
231    __FUNCTION__, this->process->pid, this->trdid, 1<<order, local_cxy, cycle );
232    ppm_remote_display( local_cxy );
233}
234#endif
235
236        ppm_t    * ppm = &LOCAL_CLUSTER->ppm;
237
238// check order
239assert( (order < CONFIG_PPM_MAX_ORDER) , "illegal order argument = %d\n" , order );
240
241    //build extended pointer on lock protecting remote PPM
242    xptr_t lock_xp = XPTR( local_cxy , &ppm->free_lock );
243
244        // take lock protecting free lists
245        remote_busylock_acquire( lock_xp );
246
247        current_block = NULL;
248    current_order = order;
249
250        // search a free block equal or larger than requested size
251        while( current_order < CONFIG_PPM_MAX_ORDER )
252        {
253        // get local pointer on the root of relevant free_list (same in all clusters)
254        list_entry_t * root = &ppm->free_pages_root[current_order];
255
256                if( !list_is_empty( root ) )
257                {
258            // get first free block in this free_list
259                        current_block = LIST_FIRST( root , page_t , list );
260
261            // remove this block from this free_list
262                        list_unlink( &current_block->list );
263                ppm->free_pages_nr[current_order] --;
264
265            // register pointer on found block
266            found_block = current_block;
267
268            // compute found block size
269                current_size = (1 << current_order);
270
271                        break; 
272                }
273
274        // increment loop index
275        current_order++;
276        }
277
278        if( current_block == NULL ) // return failure if no free block found
279        {
280                // release lock protecting free lists
281                remote_busylock_release( lock_xp );
282
283        printk("\n[%s] thread[%x,%x] cannot allocate %d page(s) in cluster %x\n",
284        __FUNCTION__, this->process->pid, this->trdid, 1<<order, local_cxy );
285
286                return NULL;
287        }
288
289
290        // split the found block in smaller sub-blocks if required
291        // and update the free-lists accordingly
292        while( current_order > order )
293        {
294        // update size and order
295                current_order --;
296                current_size >>= 1;
297
298        // update order fiels in new free block
299                current_block = found_block + current_size;
300                current_block->order = current_order;
301
302        // insert new free block in relevant free_list
303                list_add_first( &ppm->free_pages_root[current_order] , &current_block->list );
304                ppm->free_pages_nr[current_order] ++;
305        }
306
307        // update found block page descriptor
308        page_clear_flag( found_block , PG_FREE );
309        page_refcount_up( found_block );
310        found_block->order = order;
311
312        // release lock protecting free lists
313        remote_busylock_release( lock_xp );
314
315    // update DQDT
316    dqdt_increment_pages( local_cxy , order );
317
318#if DEBUG_PPM_ALLOC_PAGES
319if( DEBUG_PPM_ALLOC_PAGES < cycle )
320{
321    printk("\n[%s] thread[%x,%x] allocated %d page(s) in cluster %x / ppn %x / cycle %d\n",
322    __FUNCTION__, this->process->pid, this->trdid, 
323    1<<order, local_cxy, ppm_page2ppn(XPTR( local_cxy , found_block )), cycle );
324    ppm_remote_display( local_cxy );
325}
326#endif
327
328        return found_block;
329
330}  // end ppm_alloc_pages()
331
332////////////////////////////////////
333void ppm_free_pages( page_t * page )
334{
335        ppm_t * ppm = &LOCAL_CLUSTER->ppm;
336
337#if DEBUG_PPM_FREE_PAGES
338thread_t * this  = CURRENT_THREAD;
339uint32_t   cycle = (uint32_t)hal_get_cycles();
340#endif
341
342#if ( DEBUG_PPM_FREE_PAGES & 1 )
343if( DEBUG_PPM_FREE_PAGES < cycle )
344{
345    printk("\n[%s] thread[%x,%x] enter for %d page(s) in cluster %x / ppn %x / cycle %d\n",
346    __FUNCTION__, this->process->pid, this->trdid, 
347    1<<page->order, local_cxy, ppm_page2ppn(XPTR(local_cxy , page)), cycle );
348    ppm_remote_display( local_cxy );
349#endif
350
351    //build extended pointer on lock protecting free_lists
352    xptr_t lock_xp = XPTR( local_cxy , &ppm->free_lock );
353
354        // get lock protecting free_pages[] array
355        remote_busylock_acquire( lock_xp );
356
357        ppm_free_pages_nolock( page );
358
359        // release lock protecting free_lists
360        remote_busylock_release( lock_xp );
361
362    // update DQDT
363    dqdt_decrement_pages( local_cxy , page->order );
364
365#if DEBUG_PPM_FREE_PAGES
366if( DEBUG_PPM_FREE_PAGES < cycle )
367{
368    printk("\n[%s] thread[%x,%x] released %d page(s) in cluster %x / ppn %x / cycle %d\n",
369    __FUNCTION__, this->process->pid, this->trdid, 
370    1<<page->order, local_cxy, ppm_page2ppn(XPTR(local_cxy , page)) , cycle );
371    ppm_remote_display( local_cxy );
372}
373#endif
374
375}  // end ppm_free_pages()
376
377
378
379
380/////////////////////////////////////////////
381void * ppm_remote_alloc_pages( cxy_t     cxy,
382                               uint32_t  order )
383{
384        uint32_t   current_order;
385        uint32_t   current_size;
386    page_t   * current_block;   
387    page_t   * found_block;
388
389    thread_t * this  = CURRENT_THREAD;
390
391#if DEBUG_PPM_REMOTE_ALLOC_PAGES
392uint32_t   cycle = (uint32_t)hal_get_cycles();
393#endif
394
395#if ( DEBUG_PPM_REMOTE_ALLOC_PAGES & 1 )
396if( DEBUG_PPM_REMOTE_ALLOC_PAGES < cycle )
397{
398    printk("\n[%s] thread[%x,%x] enter for %d small page(s) in cluster %x / cycle %d\n",
399    __FUNCTION__, this->process->pid, this->trdid, 1<<order, cxy, cycle );
400    ppm_remote_display( cxy );
401}
402#endif
403
404// check order
405assert( (order < CONFIG_PPM_MAX_ORDER) , "illegal order argument = %d\n" , order );
406
407    // get local pointer on PPM (same in all clusters)
408        ppm_t * ppm = &LOCAL_CLUSTER->ppm;
409
410    //build extended pointer on lock protecting remote PPM
411    xptr_t lock_xp = XPTR( cxy , &ppm->free_lock );
412
413        // take lock protecting free lists in remote cluster
414        remote_busylock_acquire( lock_xp );
415
416    current_block = NULL;   
417    current_order = order;
418
419    // search a free block equal or larger than requested size
420    while( current_order < CONFIG_PPM_MAX_ORDER )
421    {
422        // get local pointer on the root of relevant free_list (same in all clusters)
423        list_entry_t * root = &ppm->free_pages_root[current_order];
424
425                if( !list_remote_is_empty( cxy , root ) )  // list non empty => success
426                {
427            // get local pointer on first free page descriptor in remote cluster
428                        current_block = LIST_REMOTE_FIRST( cxy, root , page_t , list );
429
430            // remove first free page from the free-list in remote cluster
431                        list_remote_unlink( cxy , &current_block->list );
432                hal_remote_atomic_add( XPTR( cxy , &ppm->free_pages_nr[current_order] ), -1 );
433
434            // register found block
435            found_block = current_block;
436
437            // compute found block size
438                current_size = (1 << current_order);
439
440                        break;
441                }
442
443        // increment loop index
444        current_order++;
445        }
446
447        if( current_block == NULL ) // return failure
448        {
449                // release lock protecting free lists
450                remote_busylock_release( lock_xp );
451
452        printk("\n[ERROR] in %s : thread[%x,%x] cannot allocate %d page(s) in cluster %x\n",
453        __FUNCTION__, this->process->pid, this->trdid, 1<<order, cxy );
454
455                return XPTR_NULL;
456        }
457
458        // split the found block in smaller sub-blocks if required
459        // and update the free-lists accordingly in remote cluster
460        while( current_order > order )
461        {
462        // update order and size
463                current_order --;
464                current_size >>= 1;
465
466        // update new free block order field in remote cluster
467                current_block = found_block + current_size;
468                hal_remote_s32( XPTR( cxy , &current_block->order ) , current_order );
469
470        // get local pointer on the root of the relevant free_list in remote cluster 
471        list_entry_t * root = &ppm->free_pages_root[current_order];
472
473        // insert new free block in this free_list
474                list_remote_add_first( cxy , root, &current_block->list );
475
476        // update free-list number of items in remote cluster
477        hal_remote_atomic_add( XPTR(cxy , &ppm->free_pages_nr[current_order]), 1 );
478        }
479
480        // update refcount, flags and order fields in found block
481        page_remote_clear_flag( XPTR( cxy , found_block ), PG_FREE );
482        page_remote_refcount_up( XPTR( cxy , found_block ) );
483        hal_remote_s32( XPTR( cxy , &found_block->order ) , order );
484   
485        // release lock protecting free lists in remote cluster
486        remote_busylock_release( lock_xp );
487
488    // update DQDT page counter in remote cluster
489    dqdt_increment_pages( cxy , order );
490
491#if DEBUG_PPM_REMOTE_ALLOC_PAGES
492if( DEBUG_PPM_REMOTE_ALLOC_PAGES < cycle )
493{
494    printk("\n[%s] thread[%x,%x] allocated %d page(s) in cluster %x / ppn %x / cycle %d\n",
495    __FUNCTION__, this->process->pid, this->trdid, 
496    1<<order, cxy, ppm_page2ppn(XPTR( cxy , found_block )), cycle );
497    ppm_remote_display( cxy );
498}
499#endif
500
501        return found_block;
502
503}  // end ppm_remote_alloc_pages()
504
505///////////////////////////////////////////////
506void ppm_remote_free_pages( cxy_t     page_cxy,
507                            page_t  * page_ptr )
508{
509    xptr_t     page_xp;          // extended pointer on released page descriptor
510        page_t   * buddy_ptr;        // searched buddy page descriptor
511    uint32_t   buddy_order;      // searched buddy page order
512        uint32_t   buddy_index;      // buddy page index in page_tbl[]
513        page_t   * current_ptr;      // current (merged) page descriptor
514        uint32_t   current_index;    // current (merged) page index in page_tbl[]
515        uint32_t   current_order;    // current (merged) page order
516
517#if DEBUG_PPM_REMOTE_FREE_PAGES
518thread_t * this  = CURRENT_THREAD;
519uint32_t   cycle = (uint32_t)hal_get_cycles();
520#endif
521
522#if ( DEBUG_PPM_REMOTE_FREE_PAGES & 1 )
523if( DEBUG_PPM_REMOTE_FREE_PAGES < cycle )
524{
525    printk("\n[%s] thread[%x,%x] enter for %d page(s) in cluster %x / ppn %x / cycle %d\n",
526    __FUNCTION__, this->process->pid, this->trdid, 
527    1<<page_ptr->order, page_cxy, ppm_page2ppn(XPTR( page_cxy , page_ptr )), cycle );
528    ppm_remote_display( page_cxy );
529}
530#endif
531
532    // build extended pointer on released page descriptor
533    page_xp = XPTR( page_cxy , page_ptr );
534   
535
536    // get local pointer on PPM (same in all clusters)
537        ppm_t * ppm = &LOCAL_CLUSTER->ppm;
538
539    // build extended pointer on lock protecting remote PPM
540    xptr_t lock_xp = XPTR( page_cxy , &ppm->free_lock );
541
542    // get local pointer on remote PPM page_tbl[] array
543        page_t * pages_tbl = hal_remote_lpt( XPTR( page_cxy , &ppm->pages_tbl ) );
544
545        // get lock protecting free_pages in remote cluster
546        remote_busylock_acquire( lock_xp );
547
548assert( !page_remote_is_flag( page_xp , PG_FREE ) ,
549"page already released : ppn = %x\n" , ppm_page2ppn(XPTR( page_cxy , page_ptr ) ) );
550
551assert( !page_remote_is_flag( page_xp , PG_RESERVED ) ,
552"reserved page : ppn = %x\n" , ppm_page2ppn(XPTR( page_cxy , page_ptr ) ) );
553
554        // set the FREE flag in released page descriptor
555        page_remote_set_flag( page_xp , PG_FREE );
556
557    // initialise loop variables
558    current_ptr   = page_ptr;
559    current_order = hal_remote_l32( XPTR( page_cxy , &page_ptr->order ) );
560        current_index = page_ptr - ppm->pages_tbl;
561
562        // search the buddy page descriptor
563        // - merge with current page descriptor if buddy found
564        // - exit to release the current page descriptor if buddy not found
565    while( current_order < CONFIG_PPM_MAX_ORDER )
566    {
567        // compute buddy page index and local pointer on page descriptor
568                buddy_index = current_index ^ (1 << current_order);
569                buddy_ptr   = pages_tbl + buddy_index;
570       
571        // exit loop if buddy not found
572                if( !page_remote_is_flag( XPTR( page_cxy , buddy_ptr ) , PG_FREE ) || 
573            (buddy_order != current_order) ) break;
574
575        // remove buddy page from its free list in remote cluster
576                list_remote_unlink( page_cxy , &buddy_ptr->list );
577        hal_remote_atomic_add( XPTR( page_cxy , &ppm->free_pages_nr[current_order] ) , -1 );
578
579        // reset order field in buddy page descriptor
580        hal_remote_s32( XPTR( page_cxy , &buddy_ptr->order ) , 0 );
581
582                // compute next (merged) page index in page_tbl[]
583                current_index &= buddy_index;
584
585        // compute next (merged) page order
586        current_order++;
587
588        // compute next (merged) page descripror
589        current_ptr = pages_tbl + current_index; 
590
591    }  // end loop on order
592
593        // update current (merged) page descriptor order field
594        current_ptr = pages_tbl + current_index;
595    hal_remote_s32( XPTR( page_cxy , &current_ptr->order ) , current_order );
596
597        // insert current (merged) page into relevant free list
598        list_remote_add_first( page_cxy , &ppm->free_pages_root[current_order] , &current_ptr->list );
599    hal_remote_atomic_add( XPTR( page_cxy , &ppm->free_pages_nr[current_order] ) , 1 );
600
601        // release lock protecting free_pages[] array
602        remote_busylock_release( lock_xp );
603
604    // update DQDT
605    dqdt_decrement_pages( page_cxy , page_ptr->order );
606
607#if DEBUG_PPM_REMOTE_FREE_PAGES
608if( DEBUG_PPM_REMOTE_FREE_PAGES < cycle )
609{
610    printk("\n[%s] thread[%x,%x] released %d page(s) in cluster %x / ppn %x / cycle %d\n",
611    __FUNCTION__, this->process->pid, this->trdid, 
612    1<<page_ptr->order, page_cxy, ppm_page2ppn(XPTR( page_cxy , page_ptr ) ), cycle );
613    ppm_remote_display( page_cxy );
614}
615#endif
616
617}  // end ppm_remote_free_pages()
618
619////////////////////////////////////
620void ppm_remote_display( cxy_t cxy )
621{
622        uint32_t       order;
623        list_entry_t * iter;
624    xptr_t         page_xp;
625
626    ppm_t * ppm = &LOCAL_CLUSTER->ppm;
627
628    // get remote PPM general parameters
629    uint32_t   pages_nr   = hal_remote_l32( XPTR( cxy , &ppm->pages_nr ) );
630    void     * vaddr_base = hal_remote_lpt( XPTR( cxy , &ppm->vaddr_base ) ); 
631    void     * pages_tbl  = hal_remote_lpt( XPTR( cxy , &ppm->pages_tbl ) ); 
632
633    // build extended pointer on lock protecting remote PPM
634    xptr_t ppm_lock_xp = XPTR( cxy , &ppm->free_lock );
635
636    // get pointers on TXT0 chdev
637    xptr_t    txt0_xp  = chdev_dir.txt_tx[0];
638    cxy_t     txt0_cxy = GET_CXY( txt0_xp );
639    chdev_t * txt0_ptr = GET_PTR( txt0_xp );
640
641    // build extended pointer on remote TXT0 lock
642    xptr_t  txt_lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock );
643
644        // get PPM lock
645        remote_busylock_acquire( ppm_lock_xp );
646
647    // get TXT0 lock
648    remote_busylock_acquire( txt_lock_xp );
649
650        nolock_printk("\n***** PPM in cluster %x / %d pages / page_tbl %x / vaddr_base %x\n",
651    local_cxy, pages_nr, pages_tbl, vaddr_base );
652
653        for( order = 0 ; order < CONFIG_PPM_MAX_ORDER ; order++ )
654        {
655        // get number of free pages for free_list[order] in remote cluster
656        uint32_t n = hal_remote_l32( XPTR( cxy , &ppm->free_pages_nr[order] ) );
657
658        // display direct free_list[order]
659                nolock_printk("- forward  : order = %d / n = %d\t: ", order , n );
660                LIST_REMOTE_FOREACH( cxy , &ppm->free_pages_root[order] , iter )
661                {
662            page_xp = XPTR( cxy , LIST_ELEMENT( iter , page_t , list ) );
663                        nolock_printk("%x," , ppm_page2ppn( page_xp ) );
664                }
665                nolock_printk("\n");
666        }
667
668        // release TXT0 lock
669        remote_busylock_release( txt_lock_xp );
670
671        // release PPM lock
672        remote_busylock_release( ppm_lock_xp );
673}
674
675////////////////////////////////
676error_t ppm_assert_order( void )
677{
678        uint32_t       order;
679        list_entry_t * iter;
680        page_t       * page;
681
682    ppm_t * ppm = &LOCAL_CLUSTER->ppm;
683
684        for( order=0 ; order < CONFIG_PPM_MAX_ORDER ; order++ )
685        {
686                if( list_is_empty( &ppm->free_pages_root[order] ) ) continue;
687
688                LIST_FOREACH( &ppm->free_pages_root[order] , iter )
689                {
690                        page = LIST_ELEMENT( iter , page_t , list );
691                        if( page->order != order )  return -1;
692                }
693        }
694
695        return 0;
696}
697
698
699//////////////////////////////////////////////////////////////////////////////////////
700//     functions to handle  dirty physical pages
701//////////////////////////////////////////////////////////////////////////////////////
702
703//////////////////////////////////////////
704bool_t ppm_page_do_dirty( xptr_t page_xp )
705{
706        bool_t done = false;
707
708    // get page cluster and local pointer
709    page_t * page_ptr = GET_PTR( page_xp );
710    cxy_t    page_cxy = GET_CXY( page_xp );
711
712    // get local pointer on PPM (same in all clusters)
713        ppm_t * ppm = &LOCAL_CLUSTER->ppm;
714
715    // build extended pointers on page lock, page flags, and PPM dirty list lock
716    xptr_t page_lock_xp  = XPTR( page_cxy , &page_ptr->lock  );   
717    xptr_t page_flags_xp = XPTR( page_cxy , &page_ptr->flags );
718    xptr_t dirty_lock_xp = XPTR( page_cxy , &ppm->dirty_lock );
719           
720        // lock the remote PPM dirty_list
721        remote_queuelock_acquire( dirty_lock_xp );
722
723    // lock the remote page
724    remote_busylock_acquire( page_lock_xp );
725
726    // get remote page flags
727    uint32_t flags = hal_remote_l32( page_flags_xp );
728
729        if( (flags & PG_DIRTY) == 0 )
730        {
731                // set dirty flag in page descriptor
732        hal_remote_s32( page_flags_xp , flags | PG_DIRTY );
733
734                // insert the page in the remote dirty list
735        list_remote_add_first( page_cxy , &ppm->dirty_root , &page_ptr->list );
736
737                done = true;
738        }
739
740    // unlock the remote page
741    remote_busylock_release( page_lock_xp );
742
743        // unlock the remote PPM dirty_list
744        remote_queuelock_release( dirty_lock_xp );
745
746        return done;
747
748} // end ppm_page_do_dirty()
749
750////////////////////////////////////////////
751bool_t ppm_page_undo_dirty( xptr_t page_xp )
752{
753        bool_t done = false;
754
755    // get page cluster and local pointer
756    page_t * page_ptr = GET_PTR( page_xp );
757    cxy_t    page_cxy = GET_CXY( page_xp );
758
759    // get local pointer on PPM (same in all clusters)
760        ppm_t * ppm = &LOCAL_CLUSTER->ppm;
761
762    // build extended pointers on page lock, page flags, and PPM dirty list lock
763    xptr_t page_lock_xp  = XPTR( page_cxy , &page_ptr->lock  );
764    xptr_t page_flags_xp = XPTR( page_cxy , &page_ptr->flags );
765    xptr_t dirty_lock_xp = XPTR( page_cxy , &ppm->dirty_lock );
766           
767        // lock the remote PPM dirty_list
768        remote_queuelock_acquire( XPTR( page_cxy , &ppm->dirty_lock ) );
769
770    // lock the remote page
771    remote_busylock_acquire( page_lock_xp );
772
773    // get remote page flags
774    uint32_t flags = hal_remote_l32( page_flags_xp );
775
776        if( (flags & PG_DIRTY) )  // page is dirty
777        {
778                // reset dirty flag in page descriptor
779        hal_remote_s32( page_flags_xp , flags & (~PG_DIRTY) );
780
781        // remove the page from remote dirty list
782        list_remote_unlink( page_cxy , &page_ptr->list );
783
784                done = true;
785        }
786
787    // unlock the remote page
788    remote_busylock_release( page_lock_xp );
789
790        // unlock the remote PPM dirty_list
791        remote_queuelock_release( dirty_lock_xp );
792
793        return done;
794
795}  // end ppm_page_undo_dirty()
796
797/////////////////////////////////
798void ppm_sync_dirty_pages( void )
799{
800        ppm_t * ppm = &LOCAL_CLUSTER->ppm;
801
802    // get local pointer on PPM dirty_root
803    list_entry_t * dirty_root = &ppm->dirty_root;
804
805    // build extended pointer on PPM dirty_lock
806    xptr_t dirty_lock_xp = XPTR( local_cxy , &ppm->dirty_lock );
807
808        // get the PPM dirty_list lock
809        remote_queuelock_acquire( dirty_lock_xp );
810
811        while( !list_is_empty( &ppm->dirty_root ) )
812        {
813                page_t * page = LIST_FIRST( dirty_root ,  page_t , list );
814        xptr_t   page_xp = XPTR( local_cxy , page );
815
816        // build extended pointer on page lock
817        xptr_t page_lock_xp = XPTR( local_cxy , &page->lock );
818
819                // get the page lock
820                remote_busylock_acquire( page_lock_xp );
821
822                // sync the page
823                vfs_fs_move_page( page_xp , false );  // from mapper to device
824
825                // release the page lock
826                remote_busylock_release( page_lock_xp );
827        }
828
829        // release the PPM dirty_list lock
830        remote_queuelock_release( dirty_lock_xp );
831
832}  // end ppm_sync_dirty_pages()
833
Note: See TracBrowser for help on using the repository browser.