source: trunk/kernel/mm/ppm.c @ 690

Last change on this file since 690 was 683, checked in by alain, 3 years ago

All modifications required to support the <tcp_chat> application
including error recovery in case of packet loss.A

File size: 25.9 KB
Line 
1/*
2 * ppm.c -  Physical Pages Manager implementation
3 *
4 * Authors  Ghassan Almaless (2008,2009,2010,2011,2012)
5 *          Alain Greiner    (2016,2017,2018,2019,2020)
6 *
7 * Copyright (c) UPMC Sorbonne Universites
8 *
9 * This file is part of ALMOS-MKH.
10 *
11 * ALMOS-MKH.is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; version 2.0 of the License.
14 *
15 * ALMOS-MKH.is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with ALMOS-MKH.; if not, write to the Free Software Foundation,
22 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
23 */
24
25#include <kernel_config.h>
26#include <hal_kernel_types.h>
27#include <hal_special.h>
28#include <printk.h>
29#include <list.h>
30#include <bits.h>
31#include <page.h>
32#include <dqdt.h>
33#include <busylock.h>
34#include <queuelock.h>
35#include <thread.h>
36#include <cluster.h>
37#include <kmem.h>
38#include <process.h>
39#include <mapper.h>
40#include <ppm.h>
41#include <vfs.h>
42
43////////////////////////////////////////////////////////////////////////////////////////
44//         global variables
45////////////////////////////////////////////////////////////////////////////////////////
46
47extern chdev_directory_t    chdev_dir;          // allocated in kernel_init.c
48
49////////////////////////////////////////////////////////////////////////////////////////
50//     functions to  translate [ page <-> base <-> ppn ]
51////////////////////////////////////////////////////////////////////////////////////////
52
53/////////////////////////////////////////////
54inline xptr_t ppm_page2base( xptr_t page_xp )
55{
56        ppm_t  * ppm      = &LOCAL_CLUSTER->ppm;
57
58    cxy_t    page_cxy = GET_CXY( page_xp );
59    page_t * page_ptr = GET_PTR( page_xp );
60
61   void   * base_ptr = ppm->vaddr_base + 
62                       ((page_ptr - ppm->pages_tbl)<<CONFIG_PPM_PAGE_ORDER);
63
64        return XPTR( page_cxy , base_ptr );
65
66} // end ppm_page2base()
67
68/////////////////////////////////////////////
69inline xptr_t ppm_base2page( xptr_t base_xp )
70{
71        ppm_t  * ppm = &LOCAL_CLUSTER->ppm;
72
73    cxy_t    base_cxy = GET_CXY( base_xp );
74    void   * base_ptr = GET_PTR( base_xp );
75
76        page_t * page_ptr = ppm->pages_tbl + 
77                        ((base_ptr - ppm->vaddr_base)>>CONFIG_PPM_PAGE_ORDER);
78
79        return XPTR( base_cxy , page_ptr );
80
81}  // end ppm_base2page()
82
83
84
85///////////////////////////////////////////
86inline ppn_t ppm_page2ppn( xptr_t page_xp )
87{
88        ppm_t  * ppm      = &LOCAL_CLUSTER->ppm;
89
90    cxy_t    page_cxy = GET_CXY( page_xp );
91    page_t * page_ptr = GET_PTR( page_xp );
92
93    paddr_t  paddr    = PADDR( page_cxy , (page_ptr - ppm->pages_tbl)<<CONFIG_PPM_PAGE_ORDER );
94
95    return (ppn_t)(paddr >> CONFIG_PPM_PAGE_ORDER);
96
97}  // end hal_page2ppn()
98
99///////////////////////////////////////
100inline xptr_t ppm_ppn2page( ppn_t ppn )
101{
102        ppm_t   * ppm  = &LOCAL_CLUSTER->ppm;
103
104    paddr_t  paddr = ((paddr_t)ppn) << CONFIG_PPM_PAGE_ORDER;
105
106    cxy_t    cxy   = CXY_FROM_PADDR( paddr );
107    lpa_t    lpa   = LPA_FROM_PADDR( paddr );
108
109    return XPTR( cxy , &ppm->pages_tbl[lpa>>CONFIG_PPM_PAGE_ORDER] );
110
111}  // end hal_ppn2page
112
113
114
115///////////////////////////////////////
116inline xptr_t ppm_ppn2base( ppn_t ppn )
117{
118        ppm_t  * ppm   = &LOCAL_CLUSTER->ppm;
119   
120    paddr_t  paddr = ((paddr_t)ppn) << CONFIG_PPM_PAGE_ORDER;
121
122    cxy_t    cxy   = CXY_FROM_PADDR( paddr );
123    lpa_t    lpa   = LPA_FROM_PADDR( paddr );
124
125        return XPTR( cxy , (void *)ppm->vaddr_base + lpa );
126
127}  // end ppm_ppn2base()
128
129///////////////////////////////////////////
130inline ppn_t ppm_base2ppn( xptr_t base_xp )
131{
132        ppm_t  * ppm      = &LOCAL_CLUSTER->ppm;
133
134    cxy_t    base_cxy = GET_CXY( base_xp );
135    void   * base_ptr = GET_PTR( base_xp );
136
137    paddr_t  paddr    = PADDR( base_cxy , (base_ptr - ppm->vaddr_base) );
138
139    return (ppn_t)(paddr >> CONFIG_PPM_PAGE_ORDER);
140
141}  // end ppm_base2ppn()
142
143
144////////////////////////////////////////////////////////////////////////////////////////
145//     functions to  allocate / release  physical pages
146////////////////////////////////////////////////////////////////////////////////////////
147
148///////////////////////////////////////////
149void ppm_free_pages_nolock( page_t * page )
150{
151        page_t   * buddy;               // searched buddy page descriptor
152        uint32_t   buddy_index;         // buddy page index in page_tbl[]
153        page_t   * current_ptr;         // current (merged) page descriptor
154        uint32_t   current_index;       // current (merged) page index in page_tbl[]
155        uint32_t   current_order;       // current (merged) page order
156
157        ppm_t    * ppm         = &LOCAL_CLUSTER->ppm;
158        page_t   * pages_tbl   = ppm->pages_tbl;
159
160assert( __FUNCTION__, !page_is_flag( page , PG_FREE ) ,
161"page already released : ppn = %x" , ppm_page2ppn( XPTR( local_cxy , page ) ) );
162
163assert( __FUNCTION__, !page_is_flag( page , PG_RESERVED ) ,
164"reserved page : ppn = %x" , ppm_page2ppn( XPTR( local_cxy , page ) ) );
165
166        // set FREE flag in released page descriptor
167        page_set_flag( page , PG_FREE );
168
169    // initialise loop variables
170    current_ptr   = page;
171    current_order = page->order;
172        current_index = page - ppm->pages_tbl;
173
174        // search the buddy page descriptor
175        // - merge with current page if buddy found
176        // - exit to release the current page when buddy not found
177    while( current_order < CONFIG_PPM_MAX_ORDER )
178    {
179        // compute buddy page index and page descriptor
180                buddy_index = current_index ^ (1 << current_order);
181                buddy       = pages_tbl + buddy_index;
182       
183        // exit loop if buddy not found in current free list
184                if( !page_is_flag( buddy , PG_FREE ) || (buddy->order != current_order) ) break;
185
186        // remove buddy page from current free_list
187                list_unlink( &buddy->list );
188                ppm->free_pages_nr[current_order] --;
189
190        // reset order field in buddy page descriptor
191                buddy->order = 0;
192
193                // compute next values for loop variables
194                current_index &= buddy_index;
195        current_order++;
196        current_ptr = pages_tbl + current_index; 
197    }
198
199        // update order field for merged page descriptor
200        current_ptr->order = current_order;
201
202        // insert merged page in relevant free list
203        list_add_first( &ppm->free_pages_root[current_order] , &current_ptr->list );
204        ppm->free_pages_nr[current_order] ++;
205
206}  // end ppm_free_pages_nolock()
207
208////////////////////////////////////////////
209page_t * ppm_alloc_pages( uint32_t   order )
210{
211        page_t   * current_block;
212        uint32_t   current_order;
213        uint32_t   current_size;
214        page_t   * found_block; 
215
216        ppm_t    * ppm = &LOCAL_CLUSTER->ppm;
217
218#if DEBUG_PPM_ALLOC_PAGES || DEBUG_PPM_ERROR
219thread_t * this  = CURRENT_THREAD;
220uint32_t   cycle = (uint32_t)hal_get_cycles();
221#endif
222
223#if DEBUG_PPM_ALLOC_PAGES
224if( DEBUG_PPM_ALLOC_PAGES < cycle )
225{
226    printk("\n[%s] thread[%x,%x] enter for %d page(s) in cluster %x / cycle %d\n",
227    __FUNCTION__, this->process->pid, this->trdid, 1<<order, local_cxy, cycle );
228    if( DEBUG_PPM_ALLOC_PAGES & 1 ) ppm_remote_display( local_cxy );
229}
230#endif
231
232// check order
233assert( __FUNCTION__, (order < CONFIG_PPM_MAX_ORDER) ,
234"illegal order argument = %d" , order );
235
236    //build extended pointer on lock protecting remote PPM
237    xptr_t lock_xp = XPTR( local_cxy , &ppm->free_lock );
238
239        // take lock protecting free lists
240        remote_busylock_acquire( lock_xp );
241
242        current_block = NULL;
243    current_order = order;
244
245        // search a free block equal or larger than requested size
246        while( current_order < CONFIG_PPM_MAX_ORDER )
247        {
248        // get local pointer on the root of relevant free_list (same in all clusters)
249        list_entry_t * root = &ppm->free_pages_root[current_order];
250
251                if( !list_is_empty( root ) )
252                {
253            // get first free block in this free_list
254                        current_block = LIST_FIRST( root , page_t , list );
255
256            // remove this block from this free_list
257                        list_unlink( &current_block->list );
258                ppm->free_pages_nr[current_order] --;
259
260            // register pointer on found block
261            found_block = current_block;
262
263            // compute found block size
264                current_size = (1 << current_order);
265
266                        break; 
267                }
268
269        // increment loop index
270        current_order++;
271        }
272
273        if( current_block == NULL ) // return failure if no free block found
274        {
275
276#if DEBUG_PPM_ERROR
277printk("\n[ERROR] in %s thread[%x,%x] cannot allocate %d page(s) in cluster %x / cycle %d\n",
278__FUNCTION__, this->process->pid, this->trdid, 1<<order, local_cxy, cycle );
279#endif
280        // release lock protecting free lists
281                remote_busylock_release( lock_xp );
282                return NULL;
283        }
284
285
286        // split the found block in smaller sub-blocks if required
287        // and update the free-lists accordingly
288        while( current_order > order )
289        {
290        // update size and order
291                current_order --;
292                current_size >>= 1;
293
294        // update order fields in new free block
295                current_block = found_block + current_size;
296                current_block->order = current_order;
297
298        // insert new free block in relevant free_list
299                list_add_first( &ppm->free_pages_root[current_order] , &current_block->list );
300                ppm->free_pages_nr[current_order] ++;
301        }
302
303        // update found block page descriptor
304        page_clear_flag( found_block , PG_FREE );
305        page_refcount_up( found_block );
306        found_block->order = order;
307
308        // release lock protecting free lists
309        remote_busylock_release( lock_xp );
310
311    // update DQDT
312    dqdt_increment_pages( local_cxy , order );
313
314    hal_fence();
315
316#if DEBUG_PPM_ALLOC_PAGES
317if( DEBUG_PPM_ALLOC_PAGES < cycle )
318{
319    printk("\n[%s] thread[%x,%x] allocated %d page(s) in cluster %x / ppn %x / cycle %d\n",
320    __FUNCTION__, this->process->pid, this->trdid, 
321    1<<order, local_cxy, ppm_page2ppn(XPTR( local_cxy , found_block )), cycle );
322    if( DEBUG_PPM_ALLOC_PAGES & 1 ) ppm_remote_display( local_cxy );
323}
324#endif
325
326        return found_block;
327
328}  // end ppm_alloc_pages()
329
330////////////////////////////////////
331void ppm_free_pages( page_t * page )
332{
333        ppm_t * ppm = &LOCAL_CLUSTER->ppm;
334
335#if DEBUG_PPM_FREE_PAGES
336thread_t * this  = CURRENT_THREAD;
337uint32_t   cycle = (uint32_t)hal_get_cycles();
338#endif
339
340#if DEBUG_PPM_FREE_PAGES
341if( DEBUG_PPM_FREE_PAGES < cycle )
342{
343    printk("\n[%s] thread[%x,%x] enter for %d page(s) in cluster %x / ppn %x / cycle %d\n",
344    __FUNCTION__, this->process->pid, this->trdid, 
345    1<<page->order, local_cxy, ppm_page2ppn(XPTR(local_cxy , page)), cycle );
346    if( DEBUG_PPM_FREE_PAGES & 1 ) ppm_remote_display( local_cxy );
347}
348#endif
349
350    //build extended pointer on lock protecting free_lists
351    xptr_t lock_xp = XPTR( local_cxy , &ppm->free_lock );
352
353        // get lock protecting free_pages[] array
354        remote_busylock_acquire( lock_xp );
355
356        ppm_free_pages_nolock( page );
357
358        // release lock protecting free_lists
359        remote_busylock_release( lock_xp );
360
361    // update DQDT
362    dqdt_decrement_pages( local_cxy , page->order );
363
364    hal_fence();
365
366#if DEBUG_PPM_FREE_PAGES
367if( DEBUG_PPM_FREE_PAGES < cycle )
368{
369    printk("\n[%s] thread[%x,%x] released %d page(s) in cluster %x / ppn %x / cycle %d\n",
370    __FUNCTION__, this->process->pid, this->trdid, 
371    1<<page->order, local_cxy, ppm_page2ppn(XPTR(local_cxy , page)) , cycle );
372    if( DEBUG_PPM_FREE_PAGES & 1 ) ppm_remote_display( local_cxy );
373}
374#endif
375
376}  // end ppm_free_pages()
377
378
379/////////////////////////////////////////////
380xptr_t ppm_remote_alloc_pages( cxy_t     cxy,
381                               uint32_t  order )
382{
383        uint32_t   current_order;
384        uint32_t   current_size;
385    page_t   * current_block;   
386    page_t   * found_block;
387
388// check order
389assert( __FUNCTION__, (order < CONFIG_PPM_MAX_ORDER) ,
390"illegal order argument = %d" , order );
391
392    // get local pointer on PPM (same in all clusters)
393        ppm_t * ppm = &LOCAL_CLUSTER->ppm;
394
395#if DEBUG_PPM_ALLOC_PAGES || DEBUG_PPM_ERROR
396thread_t * this  = CURRENT_THREAD;
397uint32_t   cycle = (uint32_t)hal_get_cycles();
398#endif
399
400#if DEBUG_PPM_ALLOC_PAGES
401if( DEBUG_PPM_ALLOC_PAGES < cycle )
402{
403    printk("\n[%s] thread[%x,%x] enter for %d page(s) in cluster %x / cycle %d\n",
404    __FUNCTION__, this->process->pid, this->trdid, 1<<order, cxy, cycle );
405    if( DEBUG_PPM_ALLOC_PAGES & 1 ) ppm_remote_display( cxy );
406}
407#endif
408
409    //build extended pointer on lock protecting remote PPM
410    xptr_t lock_xp = XPTR( cxy , &ppm->free_lock );
411
412        // take lock protecting free lists in remote cluster
413        remote_busylock_acquire( lock_xp );
414
415    current_block = NULL;   
416    current_order = order;
417
418    // search a free block equal or larger than requested size
419    while( current_order < CONFIG_PPM_MAX_ORDER )
420    {
421        // get local pointer on the root of relevant free_list (same in all clusters)
422        list_entry_t * root = &ppm->free_pages_root[current_order];
423
424                if( !list_remote_is_empty( cxy , root ) )  // list non empty => success
425                {
426            // get local pointer on first free page descriptor in remote cluster
427                        current_block = LIST_REMOTE_FIRST( cxy, root , page_t , list );
428
429            // remove first free page from the free-list in remote cluster
430                        list_remote_unlink( cxy , &current_block->list );
431                hal_remote_atomic_add( XPTR( cxy , &ppm->free_pages_nr[current_order] ), -1 );
432
433            // register found block
434            found_block = current_block;
435
436            // compute found block size
437                current_size = (1 << current_order);
438
439                        break;
440                }
441
442        // increment loop index
443        current_order++;
444        }
445
446        if( current_block == NULL ) // return failure
447        {
448
449#if DEBUG_PPM_ERROR
450 printk("\n[ERROR] in %s : thread[%x,%x] cannot allocate %d page(s) in cluster %x / cycle %d\n",
451__FUNCTION__, this->process->pid, this->trdid, 1<<order, cxy, cycle );
452#endif
453                // release lock protecting free lists
454                remote_busylock_release( lock_xp );
455                return XPTR_NULL;
456        }
457
458        // split the found block in smaller sub-blocks if required
459        // and update the free-lists accordingly in remote cluster
460        while( current_order > order )
461        {
462        // update order and size
463                current_order --;
464                current_size >>= 1;
465
466        // update new free block order field in remote cluster
467                current_block = found_block + current_size;
468                hal_remote_s32( XPTR( cxy , &current_block->order ) , current_order );
469
470        // get local pointer on the root of the relevant free_list in remote cluster 
471        list_entry_t * root = &ppm->free_pages_root[current_order];
472
473        // insert new free block in this free_list
474                list_remote_add_first( cxy , root, &current_block->list );
475
476        // update free-list number of items in remote cluster
477        hal_remote_atomic_add( XPTR(cxy , &ppm->free_pages_nr[current_order]), 1 );
478        }
479
480        // update refcount, flags and order fields in found block
481        page_remote_clear_flag( XPTR( cxy , found_block ), PG_FREE );
482        page_remote_refcount_up( XPTR( cxy , found_block ) );
483        hal_remote_s32( XPTR( cxy , &found_block->order ) , order );
484   
485        // release lock protecting free lists in remote cluster
486        remote_busylock_release( lock_xp );
487
488    // update DQDT page counter in remote cluster
489    dqdt_increment_pages( cxy , order );
490
491    hal_fence();
492
493#if DEBUG_PPM_ALLOC_PAGES
494if( DEBUG_PPM_ALLOC_PAGES < cycle )
495{
496    printk("\n[%s] thread[%x,%x] allocated %d page(s) in cluster %x / ppn %x / cycle %d\n",
497    __FUNCTION__, this->process->pid, this->trdid, 
498    1<<order, cxy, ppm_page2ppn(XPTR( cxy , found_block )), cycle );
499    if( DEBUG_PPM_ALLOC_PAGES & 1 ) ppm_remote_display( cxy );
500}
501#endif
502
503        return XPTR( cxy , found_block );
504
505}  // end ppm_remote_alloc_pages()
506
507///////////////////////////////////////////////
508void ppm_remote_free_pages( cxy_t     page_cxy,
509                            page_t  * page_ptr )
510{
511    xptr_t     page_xp;          // extended pointer on released page descriptor
512        page_t   * buddy_ptr;        // searched buddy page descriptor
513    uint32_t   buddy_order;      // searched buddy page order
514        uint32_t   buddy_index;      // buddy page index in page_tbl[]
515        page_t   * current_ptr;      // current (merged) page descriptor
516        uint32_t   current_index;    // current (merged) page index in page_tbl[]
517        uint32_t   current_order;    // current (merged) page order
518
519    // get local pointer on PPM (same in all clusters)
520        ppm_t * ppm = &LOCAL_CLUSTER->ppm;
521
522    // get page ppn and order
523    uint32_t   order = hal_remote_l32( XPTR( page_cxy , &page_ptr->order ) );
524
525#if DEBUG_PPM_FREE_PAGES
526thread_t * this  = CURRENT_THREAD;
527uint32_t   cycle = (uint32_t)hal_get_cycles();
528ppn_t      ppn   = ppm_page2ppn( XPTR( page_cxy , page_ptr ) );
529#endif
530
531#if DEBUG_PPM_FREE_PAGES
532if( DEBUG_PPM_FREE_PAGES < cycle )
533{
534    printk("\n[%s] thread[%x,%x] enter for %d page(s) in cluster %x / ppn %x / cycle %d\n",
535    __FUNCTION__, this->process->pid, this->trdid, 1<<order, page_cxy, ppn, cycle );
536    if( DEBUG_PPM_FREE_PAGES & 1 ) ppm_remote_display( page_cxy );
537}
538#endif
539
540    // build extended pointer on released page descriptor
541    page_xp = XPTR( page_cxy , page_ptr );
542   
543    // build extended pointer on lock protecting remote PPM
544    xptr_t lock_xp = XPTR( page_cxy , &ppm->free_lock );
545
546    // get local pointer on remote PPM page_tbl[] array
547        page_t * pages_tbl = hal_remote_lpt( XPTR( page_cxy , &ppm->pages_tbl ) );
548
549        // get lock protecting free_pages in remote cluster
550        remote_busylock_acquire( lock_xp );
551
552assert( __FUNCTION__, !page_remote_is_flag( page_xp , PG_FREE ) ,
553"page already released : ppn = %x" , ppm_page2ppn(XPTR( page_cxy , page_ptr ) ) );
554
555assert( __FUNCTION__, !page_remote_is_flag( page_xp , PG_RESERVED ) ,
556"reserved page : ppn = %x" , ppm_page2ppn(XPTR( page_cxy , page_ptr ) ) );
557
558        // set the FREE flag in released page descriptor
559        page_remote_set_flag( page_xp , PG_FREE );
560
561    // initialise loop variables
562    current_ptr   = page_ptr;
563    current_order = order;
564        current_index = page_ptr - ppm->pages_tbl;
565
566        // search the buddy page descriptor
567        // - merge with current page descriptor if buddy found
568        // - exit to release the current page descriptor if buddy not found
569    while( current_order < CONFIG_PPM_MAX_ORDER )
570    {
571        // compute buddy page index and local pointer on page descriptor
572                buddy_index = current_index ^ (1 << current_order);
573                buddy_ptr   = pages_tbl + buddy_index;
574
575        // get buddy order
576        buddy_order = hal_remote_l32( XPTR( page_cxy , &buddy_ptr->order ) );
577       
578        // exit loop if buddy not found
579                if( !page_remote_is_flag( XPTR( page_cxy , buddy_ptr ) , PG_FREE ) || 
580            (buddy_order != current_order) ) break;
581
582        // remove buddy page from its free list in remote cluster
583                list_remote_unlink( page_cxy , &buddy_ptr->list );
584        hal_remote_atomic_add( XPTR( page_cxy , &ppm->free_pages_nr[current_order] ) , -1 );
585
586        // reset order field in buddy page descriptor
587        hal_remote_s32( XPTR( page_cxy , &buddy_ptr->order ) , 0 );
588
589                // compute next values for loop variables
590                current_index &= buddy_index;
591        current_order++;
592        current_ptr = pages_tbl + current_index; 
593
594    }  // end loop on order
595
596        // update current (merged) page descriptor order field
597    hal_remote_s32( XPTR( page_cxy , &current_ptr->order ) , current_order );
598
599        // insert current (merged) page into relevant free list
600        list_remote_add_first( page_cxy, &ppm->free_pages_root[current_order], &current_ptr->list );
601    hal_remote_atomic_add( XPTR( page_cxy , &ppm->free_pages_nr[current_order] ) , 1 );
602
603        // release lock protecting free_pages[] array
604        remote_busylock_release( lock_xp );
605
606    // update DQDT
607    dqdt_decrement_pages( page_cxy , page_ptr->order );
608
609    hal_fence();
610
611#if DEBUG_PPM_FREE_PAGES
612if( DEBUG_PPM_FREE_PAGES < cycle )
613{
614    printk("\n[%s] thread[%x,%x] released %d page(s) in cluster %x / ppn %x / cycle %d\n",
615    __FUNCTION__, this->process->pid, this->trdid, 1<<order, page_cxy, ppn, cycle );
616    if( DEBUG_PPM_FREE_PAGES & 1 ) ppm_remote_display( page_cxy );
617}
618#endif
619
620}  // end ppm_remote_free_pages()
621
622////////////////////////////////////
623void ppm_remote_display( cxy_t cxy )
624{
625        uint32_t       order;
626        list_entry_t * iter;
627    xptr_t         page_xp;
628
629    ppm_t * ppm = &LOCAL_CLUSTER->ppm;
630
631    // get remote PPM general parameters
632    uint32_t   pages_nr   = hal_remote_l32( XPTR( cxy , &ppm->pages_nr ) );
633    void     * vaddr_base = hal_remote_lpt( XPTR( cxy , &ppm->vaddr_base ) ); 
634    void     * pages_tbl  = hal_remote_lpt( XPTR( cxy , &ppm->pages_tbl ) ); 
635
636    // build extended pointer on lock protecting remote PPM
637    xptr_t ppm_lock_xp = XPTR( cxy , &ppm->free_lock );
638
639    // get pointers on TXT0 chdev
640    xptr_t    txt0_xp  = chdev_dir.txt_tx[0];
641    cxy_t     txt0_cxy = GET_CXY( txt0_xp );
642    chdev_t * txt0_ptr = GET_PTR( txt0_xp );
643
644    // build extended pointer on remote TXT0 lock
645    xptr_t  txt_lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock );
646
647        // get PPM lock
648        remote_busylock_acquire( ppm_lock_xp );
649
650    // get TXT0 lock
651    remote_busylock_acquire( txt_lock_xp );
652
653        nolock_printk("\n***** PPM in cluster %x / %d pages / page_tbl %x / vaddr_base %x\n",
654    local_cxy, pages_nr, pages_tbl, vaddr_base );
655
656        for( order = 0 ; order < CONFIG_PPM_MAX_ORDER ; order++ )
657        {
658        // get number of free pages for free_list[order] in remote cluster
659        uint32_t n = hal_remote_l32( XPTR( cxy , &ppm->free_pages_nr[order] ) );
660
661        // display forward free_list[order]
662                nolock_printk("- forward  : order = %d / n = %d : ", order , n );
663                LIST_REMOTE_FOREACH( cxy , &ppm->free_pages_root[order] , iter )
664                {
665            page_xp = XPTR( cxy , LIST_ELEMENT( iter , page_t , list ) );
666                        nolock_printk("%x," , ppm_page2ppn( page_xp ) );
667                }
668                nolock_printk("\n");
669
670        // display backward free_list[order]
671                nolock_printk("- backward : order = %d / n = %d : ", order , n );
672                LIST_REMOTE_FOREACH_BACKWARD( cxy , &ppm->free_pages_root[order] , iter )
673                {
674            page_xp = XPTR( cxy , LIST_ELEMENT( iter , page_t , list ) );
675                        nolock_printk("%x," , ppm_page2ppn( page_xp ) );
676                }
677                nolock_printk("\n");
678        }
679
680        // release TXT0 lock
681        remote_busylock_release( txt_lock_xp );
682
683        // release PPM lock
684        remote_busylock_release( ppm_lock_xp );
685}
686
687////////////////////////////////
688error_t ppm_assert_order( void )
689{
690        uint32_t       order;
691        list_entry_t * iter;
692        page_t       * page;
693
694    ppm_t * ppm = &LOCAL_CLUSTER->ppm;
695
696        for( order=0 ; order < CONFIG_PPM_MAX_ORDER ; order++ )
697        {
698                if( list_is_empty( &ppm->free_pages_root[order] ) ) continue;
699
700                LIST_FOREACH( &ppm->free_pages_root[order] , iter )
701                {
702                        page = LIST_ELEMENT( iter , page_t , list );
703                        if( page->order != order )  return -1;
704                }
705        }
706
707        return 0;
708}
709
710
711//////////////////////////////////////////////////////////////////////////////////////
712//     functions to handle  dirty physical pages
713//////////////////////////////////////////////////////////////////////////////////////
714
715//////////////////////////////////////////
716bool_t ppm_page_do_dirty( xptr_t page_xp )
717{
718        bool_t done = false;
719
720    // get page cluster and local pointer
721    page_t * page_ptr = GET_PTR( page_xp );
722    cxy_t    page_cxy = GET_CXY( page_xp );
723
724    // get local pointer on PPM (same in all clusters)
725        ppm_t * ppm = &LOCAL_CLUSTER->ppm;
726
727    // build extended pointers on page lock, page flags, and PPM dirty list lock
728    xptr_t page_lock_xp  = XPTR( page_cxy , &page_ptr->lock  );   
729    xptr_t page_flags_xp = XPTR( page_cxy , &page_ptr->flags );
730    xptr_t dirty_lock_xp = XPTR( page_cxy , &ppm->dirty_lock );
731           
732        // lock the remote PPM dirty_list
733        remote_queuelock_acquire( dirty_lock_xp );
734
735    // lock the remote page
736    remote_busylock_acquire( page_lock_xp );
737
738    // get remote page flags
739    uint32_t flags = hal_remote_l32( page_flags_xp );
740
741        if( (flags & PG_DIRTY) == 0 )
742        {
743                // set dirty flag in page descriptor
744        hal_remote_s32( page_flags_xp , flags | PG_DIRTY );
745
746                // insert the page in the remote dirty list
747        list_remote_add_first( page_cxy , &ppm->dirty_root , &page_ptr->list );
748
749                done = true;
750        }
751
752    // unlock the remote page
753    remote_busylock_release( page_lock_xp );
754
755        // unlock the remote PPM dirty_list
756        remote_queuelock_release( dirty_lock_xp );
757
758        return done;
759
760} // end ppm_page_do_dirty()
761
762////////////////////////////////////////////
763bool_t ppm_page_undo_dirty( xptr_t page_xp )
764{
765        bool_t done = false;
766
767    // get page cluster and local pointer
768    page_t * page_ptr = GET_PTR( page_xp );
769    cxy_t    page_cxy = GET_CXY( page_xp );
770
771    // get local pointer on PPM (same in all clusters)
772        ppm_t * ppm = &LOCAL_CLUSTER->ppm;
773
774    // build extended pointers on page lock, page flags, and PPM dirty list lock
775    xptr_t page_lock_xp  = XPTR( page_cxy , &page_ptr->lock  );
776    xptr_t page_flags_xp = XPTR( page_cxy , &page_ptr->flags );
777    xptr_t dirty_lock_xp = XPTR( page_cxy , &ppm->dirty_lock );
778           
779        // lock the remote PPM dirty_list
780        remote_queuelock_acquire( XPTR( page_cxy , &ppm->dirty_lock ) );
781
782    // lock the remote page
783    remote_busylock_acquire( page_lock_xp );
784
785    // get remote page flags
786    uint32_t flags = hal_remote_l32( page_flags_xp );
787
788        if( (flags & PG_DIRTY) )  // page is dirty
789        {
790                // reset dirty flag in page descriptor
791        hal_remote_s32( page_flags_xp , flags & (~PG_DIRTY) );
792
793        // remove the page from remote dirty list
794        list_remote_unlink( page_cxy , &page_ptr->list );
795
796                done = true;
797        }
798
799    // unlock the remote page
800    remote_busylock_release( page_lock_xp );
801
802        // unlock the remote PPM dirty_list
803        remote_queuelock_release( dirty_lock_xp );
804
805        return done;
806
807}  // end ppm_page_undo_dirty()
808
809/////////////////////////////////
810void ppm_sync_dirty_pages( void )
811{
812        ppm_t * ppm = &LOCAL_CLUSTER->ppm;
813
814    // get local pointer on PPM dirty_root
815    list_entry_t * dirty_root = &ppm->dirty_root;
816
817    // build extended pointer on PPM dirty_lock
818    xptr_t dirty_lock_xp = XPTR( local_cxy , &ppm->dirty_lock );
819
820        // get the PPM dirty_list lock
821        remote_queuelock_acquire( dirty_lock_xp );
822
823        while( !list_is_empty( &ppm->dirty_root ) )
824        {
825                page_t * page = LIST_FIRST( dirty_root ,  page_t , list );
826        xptr_t   page_xp = XPTR( local_cxy , page );
827
828        // build extended pointer on page lock
829        xptr_t page_lock_xp = XPTR( local_cxy , &page->lock );
830
831                // get the page lock
832                remote_busylock_acquire( page_lock_xp );
833
834                // sync the page
835                vfs_fs_move_page( page_xp , false );  // from mapper to device
836
837                // release the page lock
838                remote_busylock_release( page_lock_xp );
839        }
840
841        // release the PPM dirty_list lock
842        remote_queuelock_release( dirty_lock_xp );
843
844}  // end ppm_sync_dirty_pages()
845
Note: See TracBrowser for help on using the repository browser.