source: trunk/kernel/mm/ppm.c @ 606

Last change on this file since 606 was 606, checked in by alain, 5 years ago

Improve the FAT32 file system to support cat, rm, cp commands.

File size: 17.3 KB
Line 
1/*
2 * ppm.c - Per-cluster Physical Pages Manager implementation
3 *
4 * Authors  Ghassan Almaless (2008,2009,2010,2011,2012)
5 *          Alain Greiner    (2016,2017,2018)
6 *
7 * Copyright (c) UPMC Sorbonne Universites
8 *
9 * This file is part of ALMOS-MKH.
10 *
11 * ALMOS-MKH.is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; version 2.0 of the License.
14 *
15 * ALMOS-MKH.is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with ALMOS-MKH.; if not, write to the Free Software Foundation,
22 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
23 */
24
25#include <kernel_config.h>
26#include <hal_kernel_types.h>
27#include <hal_special.h>
28#include <printk.h>
29#include <list.h>
30#include <bits.h>
31#include <page.h>
32#include <dqdt.h>
33#include <busylock.h>
34#include <queuelock.h>
35#include <thread.h>
36#include <cluster.h>
37#include <kmem.h>
38#include <process.h>
39#include <mapper.h>
40#include <ppm.h>
41#include <vfs.h>
42
43////////////////////////////////////////////////////////////////////////////////////////
44//     functions to  translate [ page <-> base <-> ppn ]
45////////////////////////////////////////////////////////////////////////////////////////
46
47////////////////////////////////////////////////
48inline bool_t ppm_page_is_valid( page_t * page )
49{
50        ppm_t    * ppm  = &LOCAL_CLUSTER->ppm;
51        uint32_t   pgnr = (uint32_t)( page - ppm->pages_tbl );
52        return (pgnr <= ppm->pages_nr);
53}
54
55/////////////////////////////////////////////
56inline xptr_t ppm_page2base( xptr_t page_xp )
57{
58        ppm_t  * ppm      = &LOCAL_CLUSTER->ppm;
59
60    cxy_t    page_cxy = GET_CXY( page_xp );
61    page_t * page_ptr = GET_PTR( page_xp );
62
63   void   * base_ptr = ppm->vaddr_base + 
64                       ((page_ptr - ppm->pages_tbl)<<CONFIG_PPM_PAGE_SHIFT);
65
66        return XPTR( page_cxy , base_ptr );
67
68} // end ppm_page2base()
69
70/////////////////////////////////////////////
71inline xptr_t ppm_base2page( xptr_t base_xp )
72{
73        ppm_t  * ppm = &LOCAL_CLUSTER->ppm;
74
75    cxy_t    base_cxy = GET_CXY( base_xp );
76    void   * base_ptr = GET_PTR( base_xp );
77
78        page_t * page_ptr = ppm->pages_tbl + 
79                        ((base_ptr - ppm->vaddr_base)>>CONFIG_PPM_PAGE_SHIFT);
80
81        return XPTR( base_cxy , page_ptr );
82
83}  // end ppm_base2page()
84
85
86
87///////////////////////////////////////////
88inline ppn_t ppm_page2ppn( xptr_t page_xp )
89{
90        ppm_t  * ppm      = &LOCAL_CLUSTER->ppm;
91
92    cxy_t    page_cxy = GET_CXY( page_xp );
93    page_t * page_ptr = GET_PTR( page_xp );
94
95    paddr_t  paddr    = PADDR( page_cxy , (page_ptr - ppm->pages_tbl)<<CONFIG_PPM_PAGE_SHIFT );
96
97    return (ppn_t)(paddr >> CONFIG_PPM_PAGE_SHIFT);
98
99}  // end hal_page2ppn()
100
101///////////////////////////////////////
102inline xptr_t ppm_ppn2page( ppn_t ppn )
103{
104        ppm_t   * ppm  = &LOCAL_CLUSTER->ppm;
105
106    paddr_t  paddr = ((paddr_t)ppn) << CONFIG_PPM_PAGE_SHIFT;
107
108    cxy_t    cxy   = CXY_FROM_PADDR( paddr );
109    lpa_t    lpa   = LPA_FROM_PADDR( paddr );
110
111    return XPTR( cxy , &ppm->pages_tbl[lpa>>CONFIG_PPM_PAGE_SHIFT] );
112
113}  // end hal_ppn2page
114
115
116
117///////////////////////////////////////
118inline xptr_t ppm_ppn2base( ppn_t ppn )
119{
120        ppm_t  * ppm   = &LOCAL_CLUSTER->ppm;
121   
122    paddr_t  paddr = ((paddr_t)ppn) << CONFIG_PPM_PAGE_SHIFT;
123
124    cxy_t    cxy   = CXY_FROM_PADDR( paddr );
125    lpa_t    lpa   = LPA_FROM_PADDR( paddr );
126
127        return XPTR( cxy , (void *)ppm->vaddr_base + lpa );
128
129}  // end ppm_ppn2base()
130
131///////////////////////////////////////////
132inline ppn_t ppm_base2ppn( xptr_t base_xp )
133{
134        ppm_t  * ppm      = &LOCAL_CLUSTER->ppm;
135
136    cxy_t    base_cxy = GET_CXY( base_xp );
137    void   * base_ptr = GET_PTR( base_xp );
138
139    paddr_t  paddr    = PADDR( base_cxy , (base_ptr - ppm->vaddr_base) );
140
141    return (ppn_t)(paddr >> CONFIG_PPM_PAGE_SHIFT);
142
143}  // end ppm_base2ppn()
144
145
146////////////////////////////////////////////////////////////////////////////////////////
147//     functions to  allocate / release  physical pages
148////////////////////////////////////////////////////////////////////////////////////////
149
150///////////////////////////////////////////
151void ppm_free_pages_nolock( page_t * page )
152{
153        page_t   * buddy;            // searched buddy page descriptor
154        uint32_t   buddy_index;      // buddy page index
155        page_t   * current;          // current (merged) page descriptor
156        uint32_t   current_index;    // current (merged) page index
157        uint32_t   current_order;    // current (merged) page order
158
159        ppm_t    * ppm         = &LOCAL_CLUSTER->ppm;
160        page_t   * pages_tbl   = ppm->pages_tbl;
161
162        assert( !page_is_flag( page , PG_FREE ) ,
163    "page already released : ppn = %x\n" , ppm_page2ppn(XPTR(local_cxy,page)) );
164
165        assert( !page_is_flag( page , PG_RESERVED ) ,
166    "reserved page : ppn = %x\n" , ppm_page2ppn(XPTR(local_cxy,page)) );
167
168        // update released page descriptor flags
169        page_set_flag( page , PG_FREE );
170
171        // search the buddy page descriptor
172        // - merge with current page descriptor if found
173        // - exit to release the current page descriptor if not found
174        current       = page ,
175        current_index = (uint32_t)(page - ppm->pages_tbl);
176        for( current_order = page->order ;
177             current_order < CONFIG_PPM_MAX_ORDER ;
178             current_order++ )
179        {
180                buddy_index = current_index ^ (1 << current_order);
181                buddy       = pages_tbl + buddy_index;
182
183                if( !page_is_flag( buddy , PG_FREE ) || (buddy->order != current_order) ) break;
184
185                // remove buddy from free list
186                list_unlink( &buddy->list );
187                ppm->free_pages_nr[current_order] --;
188
189                // merge buddy with current
190                buddy->order = 0;
191                current_index &= buddy_index;
192        }
193
194        // update merged page descriptor order
195        current        = pages_tbl + current_index;
196        current->order = current_order;
197
198        // insert current in free list
199        list_add_first( &ppm->free_pages_root[current_order] , &current->list );
200        ppm->free_pages_nr[current_order] ++;
201
202}  // end ppm_free_pages_nolock()
203
204////////////////////////////////////////////
205page_t * ppm_alloc_pages( uint32_t   order )
206{
207        uint32_t   current_order;
208        page_t   * remaining_block;
209        uint32_t   current_size;
210
211#if DEBUG_PPM_ALLOC_PAGES
212uint32_t cycle = (uint32_t)hal_get_cycles();
213if( DEBUG_PPM_ALLOC_PAGES < cycle )
214printk("\n[DBG] in %s : thread %x in process %x enter for %d page(s) / cycle %d\n",
215__FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid, 1<<order, cycle );
216#endif
217
218#if(DEBUG_PPM_ALLOC_PAGES & 0x1)
219if( DEBUG_PPM_ALLOC_PAGES < cycle )
220ppm_print();
221#endif
222
223        ppm_t    * ppm = &LOCAL_CLUSTER->ppm;
224
225        assert( (order < CONFIG_PPM_MAX_ORDER) ,
226    "illegal order argument = %x\n" , order );
227
228        page_t * block = NULL; 
229
230        // take lock protecting free lists
231        busylock_acquire( &ppm->free_lock );
232
233        // find a free block equal or larger to requested size
234        for( current_order = order ; current_order < CONFIG_PPM_MAX_ORDER ; current_order ++ )
235        {
236                if( !list_is_empty( &ppm->free_pages_root[current_order] ) )
237                {
238                        block = LIST_FIRST( &ppm->free_pages_root[current_order] , page_t , list );
239                        list_unlink( &block->list );
240                        break;
241                }
242        }
243
244        if( block == NULL ) // return failure
245        {
246                // release lock protecting free lists
247                busylock_release( &ppm->free_lock );
248
249#if DEBUG_PPM_ALLOC_PAGES
250cycle = (uint32_t)hal_get_cycles();
251if( DEBUG_PPM_ALLOC_PAGES < cycle )
252printk("\n[DBG] in %s : thread %x in process %x cannot allocate %d page(s) / cycle %d\n",
253__FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid, 1<<order, cycle );
254#endif
255
256                return NULL;
257        }
258
259        // update free-lists after removing a block
260        ppm->free_pages_nr[current_order] --;
261        current_size = (1 << current_order);
262
263        // split the removed block in smaller sub-blocks if required
264        // and update the free-lists accordingly
265        while( current_order > order )
266        {
267                current_order --;
268                current_size >>= 1;
269
270                remaining_block = block + current_size;
271                remaining_block->order = current_order;
272
273                list_add_first( &ppm->free_pages_root[current_order] , &remaining_block->list );
274                ppm->free_pages_nr[current_order] ++;
275        }
276
277        // update page descriptor
278        page_clear_flag( block , PG_FREE );
279        page_refcount_up( block );
280        block->order = order;
281
282        // release lock protecting free lists
283        busylock_release( &ppm->free_lock );
284
285    // update DQDT
286    dqdt_increment_pages( order );
287
288#if DEBUG_PPM_ALLOC_PAGES
289cycle = (uint32_t)hal_get_cycles();
290if( DEBUG_PPM_ALLOC_PAGES < cycle )
291printk("\n[DBG] in %s : thread %x in process %x exit for %d page(s) / ppn = %x / cycle %d\n",
292__FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid, 
2931<<order, ppm_page2ppn(XPTR( local_cxy , block )), cycle );
294#endif
295
296        return block;
297
298}  // end ppm_alloc_pages()
299
300
301////////////////////////////////////
302void ppm_free_pages( page_t * page )
303{
304        ppm_t * ppm = &LOCAL_CLUSTER->ppm;
305
306#if DEBUG_PPM_FREE_PAGES
307uint32_t cycle = (uint32_t)hal_get_cycles();
308if( DEBUG_PPM_FREE_PAGES < cycle )
309printk("\n[DBG] in %s : thread %x in process %x enter for %d page(s) / ppn %x / cycle %d\n",
310__FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid, 
3111<<page->order, ppm_page2ppn(XPTR(local_cxy , page)), cycle );
312#endif
313
314#if(DEBUG_PPM_FREE_PAGES & 0x1)
315if( DEBUG_PPM_FREE_PAGES < cycle )
316ppm_print();
317#endif
318
319        // get lock protecting free_pages[] array
320        busylock_acquire( &ppm->free_lock );
321
322        ppm_free_pages_nolock( page );
323
324        // release lock protecting free_pages[] array
325        busylock_release( &ppm->free_lock );
326
327    // update DQDT
328    dqdt_decrement_pages( page->order );
329
330#if DEBUG_PPM_FREE_PAGES
331cycle = (uint32_t)hal_get_cycles();
332if( DEBUG_PPM_FREE_PAGES < cycle )
333printk("\n[DBG] in %s : thread %x in process %x exit for %d page(s) / ppn %x / cycle %d\n",
334__FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid, 
3351<<page->order, ppm_page2ppn(XPTR(local_cxy , page)), cycle );
336#endif
337
338}  // end ppm_free_pages()
339
340//////////////////////
341void ppm_print( void )
342{
343        uint32_t       order;
344        list_entry_t * iter;
345        page_t       * page;
346
347    ppm_t * ppm = &LOCAL_CLUSTER->ppm;
348
349        // get lock protecting free lists
350        busylock_acquire( &ppm->free_lock );
351
352        printk("\n***  PPM in cluster %x : %d pages ***\n", local_cxy , ppm->pages_nr );
353
354        for( order = 0 ; order < CONFIG_PPM_MAX_ORDER ; order++ )
355        {
356                printk("- order = %d / free_pages = %d\t: ",
357                       order , ppm->free_pages_nr[order] );
358
359                LIST_FOREACH( &ppm->free_pages_root[order] , iter )
360                {
361                        page = LIST_ELEMENT( iter , page_t , list );
362                        printk("%x," , page - ppm->pages_tbl );
363                }
364
365                printk("\n");
366        }
367
368        // release lock protecting free lists
369        busylock_release( &ppm->free_lock );
370}
371
372///////////////////////////////////////
373error_t ppm_assert_order( ppm_t * ppm )
374{
375        uint32_t       order;
376        list_entry_t * iter;
377        page_t       * page;
378
379        for( order=0 ; order < CONFIG_PPM_MAX_ORDER ; order++ )
380        {
381                if( list_is_empty( &ppm->free_pages_root[order] ) ) continue;
382
383                LIST_FOREACH( &ppm->free_pages_root[order] , iter )
384                {
385                        page = LIST_ELEMENT( iter , page_t , list );
386                        if( page->order != order )  return -1;
387                }
388        }
389
390        return 0;
391}
392
393
394//////////////////////////////////////////////////////////////////////////////////////
395//     functions to handle  dirty physical pages
396//////////////////////////////////////////////////////////////////////////////////////
397
398//////////////////////////////////////////
399bool_t ppm_page_do_dirty( xptr_t page_xp )
400{
401        bool_t done = false;
402
403    // get page cluster and local pointer
404    page_t * page_ptr = GET_PTR( page_xp );
405    cxy_t    page_cxy = GET_CXY( page_xp );
406
407    // get local pointer on PPM (same in all clusters)
408        ppm_t * ppm = &LOCAL_CLUSTER->ppm;
409
410    // build extended pointers on page lock, page flags, and PPM dirty list lock
411    xptr_t page_lock_xp  = XPTR( page_cxy , &page_ptr->lock  );   
412    xptr_t page_flags_xp = XPTR( page_cxy , &page_ptr->flags );
413    xptr_t dirty_lock_xp = XPTR( page_cxy , &ppm->dirty_lock );
414           
415        // lock the remote PPM dirty_list
416        remote_queuelock_acquire( dirty_lock_xp );
417
418    // lock the remote page
419    remote_busylock_acquire( page_lock_xp );
420
421    // get remote page flags
422    uint32_t flags = hal_remote_l32( page_flags_xp );
423
424        if( (flags & PG_DIRTY) == 0 )
425        {
426                // set dirty flag in page descriptor
427        hal_remote_s32( page_flags_xp , flags | PG_DIRTY );
428
429                // The PPM dirty list is a LOCAL list !!!
430        // We must update 4 pointers to insert a new page in this list.
431        // We can use the standard LIST API when the page is local,
432        // but we cannot use the standard API if the page is remote...
433
434        if( page_cxy == local_cxy )         // locally update the PPM dirty list
435        {
436            list_add_first( &ppm->dirty_root , &page_ptr->list );
437        } 
438        else                                // remotely update the PPM dirty list
439        {
440            // get local and remote pointers on "root" list entry
441            list_entry_t * root    = &ppm->dirty_root;
442            xptr_t         root_xp = XPTR( page_cxy , root );
443
444            // get local and remote pointers on "page" list entry
445            list_entry_t * list    = &page_ptr->list;
446            xptr_t         list_xp = XPTR( page_cxy , list );
447
448            // get local and remote pointers on first dirty page
449            list_entry_t * dirt    = hal_remote_lpt( XPTR( page_cxy, &root->next ) );
450            xptr_t         dirt_xp = XPTR( page_cxy , dirt );
451
452            // set root.next, list.next, list pred, curr.pred in remote cluster
453            hal_remote_spt( root_xp                    , list );
454            hal_remote_spt( list_xp                    , dirt );
455            hal_remote_spt( list_xp + sizeof(intptr_t) , root );
456            hal_remote_spt( dirt_xp + sizeof(intptr_t) , list );
457        }
458
459                done = true;
460        }
461
462    // unlock the remote page
463    remote_busylock_release( page_lock_xp );
464
465        // unlock the remote PPM dirty_list
466        remote_queuelock_release( dirty_lock_xp );
467
468        return done;
469
470} // end ppm_page_do_dirty()
471
472////////////////////////////////////////////
473bool_t ppm_page_undo_dirty( xptr_t page_xp )
474{
475        bool_t done = false;
476
477    // get page cluster and local pointer
478    page_t * page_ptr = GET_PTR( page_xp );
479    cxy_t    page_cxy = GET_CXY( page_xp );
480
481    // get local pointer on PPM (same in all clusters)
482        ppm_t * ppm = &LOCAL_CLUSTER->ppm;
483
484    // build extended pointers on page lock, page flags, and PPM dirty list lock
485    xptr_t page_lock_xp  = XPTR( page_cxy , &page_ptr->lock  );
486    xptr_t page_flags_xp = XPTR( page_cxy , &page_ptr->flags );
487    xptr_t dirty_lock_xp = XPTR( page_cxy , &ppm->dirty_lock );
488           
489        // lock the remote PPM dirty_list
490        remote_queuelock_acquire( XPTR( page_cxy , &ppm->dirty_lock ) );
491
492    // lock the remote page
493    remote_busylock_acquire( page_lock_xp );
494
495    // get remote page flags
496    uint32_t flags = hal_remote_l32( page_flags_xp );
497
498        if( (flags & PG_DIRTY) )  // page is dirty
499        {
500                // reset dirty flag in page descriptor
501        hal_remote_s32( page_flags_xp , flags & (~PG_DIRTY) );
502
503                // The PPM dirty list is a LOCAL list !!!
504        // We must update 4 pointers to remove a page from this list.
505        // we can use the standard LIST API when the page is local,
506        // but we cannot use the standard API if the page is remote...
507
508        if( page_cxy == local_cxy )         // locally update the PPM dirty list
509        {
510            list_unlink( &page_ptr->list );
511        } 
512        else                                // remotely update the PPM dirty list
513        {
514            // get local and remote pointers on "page" list entry
515            list_entry_t * list    = &page_ptr->list;
516            xptr_t         list_xp = XPTR( page_cxy , list );
517
518            // get local and remote pointers on "next" page list entry
519            list_entry_t * next    = hal_remote_lpt( list_xp );
520            xptr_t         next_xp = XPTR( page_cxy , next );
521
522            // get local and remote pointers on "pred" page list entry
523            list_entry_t * pred    = hal_remote_lpt( list_xp + sizeof(intptr_t) );
524            xptr_t         pred_xp = XPTR( page_cxy , pred );
525
526            // set root.next, list.next, list pred, curr.pred in remote cluster
527            hal_remote_spt( pred_xp                    , next );
528            hal_remote_spt( list_xp                    , NULL );
529            hal_remote_spt( list_xp + sizeof(intptr_t) , NULL );
530            hal_remote_spt( next_xp + sizeof(intptr_t) , pred );
531        }
532
533                done = true;
534        }
535
536    // unlock the remote page
537    remote_busylock_release( page_lock_xp );
538
539        // unlock the remote PPM dirty_list
540        remote_queuelock_release( dirty_lock_xp );
541
542        return done;
543
544}  // end ppm_page_undo_dirty()
545
546/////////////////////////////////
547void ppm_sync_dirty_pages( void )
548{
549        ppm_t * ppm = &LOCAL_CLUSTER->ppm;
550
551    // get local pointer on PPM dirty_root
552    list_entry_t * dirty_root = &ppm->dirty_root;
553
554    // build extended pointer on PPM dirty_lock
555    xptr_t dirty_lock_xp = XPTR( local_cxy , &ppm->dirty_lock );
556
557        // get the PPM dirty_list lock
558        remote_queuelock_acquire( dirty_lock_xp );
559
560        while( !list_is_empty( &ppm->dirty_root ) )
561        {
562                page_t * page = LIST_FIRST( dirty_root ,  page_t , list );
563        xptr_t   page_xp = XPTR( local_cxy , page );
564
565        // build extended pointer on page lock
566        xptr_t page_lock_xp = XPTR( local_cxy , &page->lock );
567
568                // get the page lock
569                remote_busylock_acquire( page_lock_xp );
570
571                // sync the page
572                vfs_fs_move_page( page_xp , false );  // from mapper to device
573
574                // release the page lock
575                remote_busylock_release( page_lock_xp );
576        }
577
578        // release the PPM dirty_list lock
579        remote_queuelock_release( dirty_lock_xp );
580
581}  // end ppm_sync_dirty_pages()
582
Note: See TracBrowser for help on using the repository browser.