source: trunk/kernel/mm/ppm.c @ 582

Last change on this file since 582 was 567, checked in by alain, 6 years ago

Complete restructuration of kernel locks.

File size: 12.6 KB
Line 
1/*
2 * ppm.c - Per-cluster Physical Pages Manager implementation
3 *
4 * Authors  Ghassan Almaless (2008,2009,2010,2011,2012)
5 *          Alain Greiner    (2016,2017,2018)
6 *
7 * Copyright (c) UPMC Sorbonne Universites
8 *
9 * This file is part of ALMOS-MKH.
10 *
11 * ALMOS-MKH.is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; version 2.0 of the License.
14 *
15 * ALMOS-MKH.is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with ALMOS-MKH.; if not, write to the Free Software Foundation,
22 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
23 */
24
25#include <kernel_config.h>
26#include <hal_kernel_types.h>
27#include <hal_special.h>
28#include <printk.h>
29#include <list.h>
30#include <bits.h>
31#include <page.h>
32#include <busylock.h>
33#include <queuelock.h>
34#include <thread.h>
35#include <cluster.h>
36#include <kmem.h>
37#include <process.h>
38#include <mapper.h>
39#include <ppm.h>
40
41////////////////////////////////////////////////////////////////////////////////////////
42//     functions to  translate [ page <-> base <-> ppn ]
43////////////////////////////////////////////////////////////////////////////////////////
44
45////////////////////////////////////////////////
46inline bool_t ppm_page_is_valid( page_t * page )
47{
48        ppm_t    * ppm  = &LOCAL_CLUSTER->ppm;
49        uint32_t   pgnr = (uint32_t)( page - ppm->pages_tbl );
50        return (pgnr <= ppm->pages_nr);
51}
52
53/////////////////////////////////////////////
54inline xptr_t ppm_page2base( xptr_t page_xp )
55{
56        ppm_t  * ppm      = &LOCAL_CLUSTER->ppm;
57
58    cxy_t    page_cxy = GET_CXY( page_xp );
59    page_t * page_ptr = GET_PTR( page_xp );
60
61   void   * base_ptr = ppm->vaddr_base + 
62                       ((page_ptr - ppm->pages_tbl)<<CONFIG_PPM_PAGE_SHIFT);
63
64        return XPTR( page_cxy , base_ptr );
65
66} // end ppm_page2base()
67
68/////////////////////////////////////////////
69inline xptr_t ppm_base2page( xptr_t base_xp )
70{
71        ppm_t  * ppm = &LOCAL_CLUSTER->ppm;
72
73    cxy_t    base_cxy = GET_CXY( base_xp );
74    void   * base_ptr = GET_PTR( base_xp );
75
76        page_t * page_ptr = ppm->pages_tbl + 
77                        ((base_ptr - ppm->vaddr_base)>>CONFIG_PPM_PAGE_SHIFT);
78
79        return XPTR( base_cxy , page_ptr );
80
81}  // end ppm_base2page()
82
83
84
85///////////////////////////////////////////
86inline ppn_t ppm_page2ppn( xptr_t page_xp )
87{
88        ppm_t  * ppm      = &LOCAL_CLUSTER->ppm;
89
90    cxy_t    page_cxy = GET_CXY( page_xp );
91    page_t * page_ptr = GET_PTR( page_xp );
92
93    paddr_t  paddr    = PADDR( page_cxy , (page_ptr - ppm->pages_tbl)<<CONFIG_PPM_PAGE_SHIFT );
94
95    return (ppn_t)(paddr >> CONFIG_PPM_PAGE_SHIFT);
96
97}  // end hal_page2ppn()
98
99///////////////////////////////////////
100inline xptr_t ppm_ppn2page( ppn_t ppn )
101{
102        ppm_t   * ppm  = &LOCAL_CLUSTER->ppm;
103
104    paddr_t  paddr = ((paddr_t)ppn) << CONFIG_PPM_PAGE_SHIFT;
105
106    cxy_t    cxy   = CXY_FROM_PADDR( paddr );
107    lpa_t    lpa   = LPA_FROM_PADDR( paddr );
108
109    return XPTR( cxy , &ppm->pages_tbl[lpa>>CONFIG_PPM_PAGE_SHIFT] );
110
111}  // end hal_ppn2page
112
113
114
115///////////////////////////////////////
116inline xptr_t ppm_ppn2base( ppn_t ppn )
117{
118        ppm_t  * ppm   = &LOCAL_CLUSTER->ppm;
119   
120    paddr_t  paddr = ((paddr_t)ppn) << CONFIG_PPM_PAGE_SHIFT;
121
122    cxy_t    cxy   = CXY_FROM_PADDR( paddr );
123    lpa_t    lpa   = LPA_FROM_PADDR( paddr );
124
125        return XPTR( cxy , (void *)ppm->vaddr_base + lpa );
126
127}  // end ppm_ppn2base()
128
129///////////////////////////////////////////
130inline ppn_t ppm_base2ppn( xptr_t base_xp )
131{
132        ppm_t  * ppm      = &LOCAL_CLUSTER->ppm;
133
134    cxy_t    base_cxy = GET_CXY( base_xp );
135    void   * base_ptr = GET_PTR( base_xp );
136
137    paddr_t  paddr    = PADDR( base_cxy , (base_ptr - ppm->vaddr_base) );
138
139    return (ppn_t)(paddr >> CONFIG_PPM_PAGE_SHIFT);
140
141}  // end ppm_base2ppn()
142
143
144////////////////////////////////////////////////////////////////////////////////////////
145//     functions to  allocate / release  physical pages
146////////////////////////////////////////////////////////////////////////////////////////
147
148///////////////////////////////////////////
149void ppm_free_pages_nolock( page_t * page )
150{
151        page_t   * buddy;            // searched buddy page descriptor
152        uint32_t   buddy_index;      // buddy page index
153        page_t   * current;          // current (merged) page descriptor
154        uint32_t   current_index;    // current (merged) page index
155        uint32_t   current_order;    // current (merged) page order
156
157        ppm_t    * ppm         = &LOCAL_CLUSTER->ppm;
158        page_t   * pages_tbl   = ppm->pages_tbl;
159
160        assert( !page_is_flag( page , PG_FREE ) ,
161    "page already released : ppn = %x\n" , ppm_page2ppn(XPTR(local_cxy,page)) );
162
163        assert( !page_is_flag( page , PG_RESERVED ) ,
164    "reserved page : ppn = %x\n" , ppm_page2ppn(XPTR(local_cxy,page)) );
165
166        // update released page descriptor flags
167        page_set_flag( page , PG_FREE );
168
169        // search the buddy page descriptor
170        // - merge with current page descriptor if found
171        // - exit to release the current page descriptor if not found
172        current       = page ,
173        current_index = (uint32_t)(page - ppm->pages_tbl);
174        for( current_order = page->order ;
175             current_order < CONFIG_PPM_MAX_ORDER ;
176             current_order++ )
177        {
178                buddy_index = current_index ^ (1 << current_order);
179                buddy       = pages_tbl + buddy_index;
180
181                if( !page_is_flag( buddy , PG_FREE ) || (buddy->order != current_order) ) break;
182
183                // remove buddy from free list
184                list_unlink( &buddy->list );
185                ppm->free_pages_nr[current_order] --;
186
187                // merge buddy with current
188                buddy->order = 0;
189                current_index &= buddy_index;
190        }
191
192        // update merged page descriptor order
193        current        = pages_tbl + current_index;
194        current->order = current_order;
195
196        // insert current in free list
197        list_add_first( &ppm->free_pages_root[current_order] , &current->list );
198        ppm->free_pages_nr[current_order] ++;
199
200}  // end ppm_free_pages_nolock()
201
202////////////////////////////////////////////
203page_t * ppm_alloc_pages( uint32_t   order )
204{
205        uint32_t   current_order;
206        page_t   * remaining_block;
207        uint32_t   current_size;
208
209#if DEBUG_PPM_ALLOC_PAGES
210uint32_t cycle = (uint32_t)hal_get_cycles();
211if( DEBUG_PPM_ALLOC_PAGES < cycle )
212printk("\n[DBG] in %s : thread %x in process %x enter for %d page(s) / cycle %d\n",
213__FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid, 1<<order, cycle );
214#endif
215
216#if(DEBUG_PPM_ALLOC_PAGES & 0x1)
217if( DEBUG_PPM_ALLOC_PAGES < cycle )
218ppm_print();
219#endif
220
221        ppm_t    * ppm = &LOCAL_CLUSTER->ppm;
222
223        assert( (order < CONFIG_PPM_MAX_ORDER) ,
224    "illegal order argument = %x\n" , order );
225
226        page_t * block = NULL; 
227
228        // take lock protecting free lists
229        busylock_acquire( &ppm->free_lock );
230
231        // find a free block equal or larger to requested size
232        for( current_order = order ; current_order < CONFIG_PPM_MAX_ORDER ; current_order ++ )
233        {
234                if( !list_is_empty( &ppm->free_pages_root[current_order] ) )
235                {
236                        block = LIST_FIRST( &ppm->free_pages_root[current_order] , page_t , list );
237                        list_unlink( &block->list );
238                        break;
239                }
240        }
241
242        if( block == NULL ) // return failure
243        {
244                // release lock protecting free lists
245                busylock_release( &ppm->free_lock );
246
247#if DEBUG_PPM_ALLOC_PAGES
248cycle = (uint32_t)hal_get_cycles();
249if( DEBUG_PPM_ALLOC_PAGES < cycle )
250printk("\n[DBG] in %s : thread %x in process %x cannot allocate %d page(s) / cycle %d\n",
251__FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid, 1<<order, cycle );
252#endif
253
254                return NULL;
255        }
256
257        // update free-lists after removing a block
258        ppm->free_pages_nr[current_order] --;
259        current_size = (1 << current_order);
260
261        // split the removed block in smaller sub-blocks if required
262        // and update the free-lists accordingly
263        while( current_order > order )
264        {
265                current_order --;
266                current_size >>= 1;
267
268                remaining_block = block + current_size;
269                remaining_block->order = current_order;
270
271                list_add_first( &ppm->free_pages_root[current_order] , &remaining_block->list );
272                ppm->free_pages_nr[current_order] ++;
273        }
274
275        // update page descriptor
276        page_clear_flag( block , PG_FREE );
277        page_refcount_up( block );
278        block->order = order;
279
280        // release lock protecting free lists
281        busylock_release( &ppm->free_lock );
282
283#if DEBUG_PPM_ALLOC_PAGES
284cycle = (uint32_t)hal_get_cycles();
285if( DEBUG_PPM_ALLOC_PAGES < cycle )
286printk("\n[DBG] in %s : thread %x in process %x exit for %d page(s) / ppn = %x / cycle %d\n",
287__FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid, 
2881<<order, ppm_page2ppn(XPTR( local_cxy , block )), cycle );
289#endif
290
291        return block;
292
293}  // end ppm_alloc_pages()
294
295
296////////////////////////////////////
297void ppm_free_pages( page_t * page )
298{
299        ppm_t * ppm = &LOCAL_CLUSTER->ppm;
300
301#if DEBUG_PPM_FREE_PAGES
302uint32_t cycle = (uint32_t)hal_get_cycles();
303if( DEBUG_PPM_FREE_PAGES < cycle )
304printk("\n[DBG] in %s : thread %x in process %x enter for %d page(s) / ppn %x / cycle %d\n",
305__FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid, 
3061<<page->order, ppm_page2ppn(XPTR(local_cxy , page)), cycle );
307#endif
308
309#if(DEBUG_PPM_FREE_PAGES & 0x1)
310if( DEBUG_PPM_FREE_PAGES < cycle )
311ppm_print();
312#endif
313
314        // get lock protecting free_pages[] array
315        busylock_acquire( &ppm->free_lock );
316
317        ppm_free_pages_nolock( page );
318
319        // release lock protecting free_pages[] array
320        busylock_release( &ppm->free_lock );
321
322#if DEBUG_PPM_FREE_PAGES
323cycle = (uint32_t)hal_get_cycles();
324if( DEBUG_PPM_FREE_PAGES < cycle )
325printk("\n[DBG] in %s : thread %x in process %x exit for %d page(s) / ppn %x / cycle %d\n",
326__FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid, 
3271<<page->order, ppm_page2ppn(XPTR(local_cxy , page)), cycle );
328#endif
329
330}  // end ppm_free_pages()
331
332//////////////////////
333void ppm_print( void )
334{
335        uint32_t       order;
336        list_entry_t * iter;
337        page_t       * page;
338
339    ppm_t * ppm = &LOCAL_CLUSTER->ppm;
340
341        // get lock protecting free lists
342        busylock_acquire( &ppm->free_lock );
343
344        printk("\n***  PPM in cluster %x : %d pages ***\n", local_cxy , ppm->pages_nr );
345
346        for( order = 0 ; order < CONFIG_PPM_MAX_ORDER ; order++ )
347        {
348                printk("- order = %d / free_pages = %d\t: ",
349                       order , ppm->free_pages_nr[order] );
350
351                LIST_FOREACH( &ppm->free_pages_root[order] , iter )
352                {
353                        page = LIST_ELEMENT( iter , page_t , list );
354                        printk("%x," , page - ppm->pages_tbl );
355                }
356
357                printk("\n");
358        }
359
360        // release lock protecting free lists
361        busylock_release( &ppm->free_lock );
362}
363
364///////////////////////////////////////
365error_t ppm_assert_order( ppm_t * ppm )
366{
367        uint32_t       order;
368        list_entry_t * iter;
369        page_t       * page;
370
371        for( order=0 ; order < CONFIG_PPM_MAX_ORDER ; order++ )
372        {
373                if( list_is_empty( &ppm->free_pages_root[order] ) ) continue;
374
375                LIST_FOREACH( &ppm->free_pages_root[order] , iter )
376                {
377                        page = LIST_ELEMENT( iter , page_t , list );
378                        if( page->order != order )  return -1;
379                }
380        }
381
382        return 0;
383}
384
385
386//////////////////////////////////////////////////////////////////////////////////////
387//     functions to handle  dirty physical pages
388//////////////////////////////////////////////////////////////////////////////////////
389
390/////////////////////////////////////////
391bool_t ppm_page_do_dirty( page_t * page )
392{
393        bool_t done = false;
394
395        ppm_t * ppm = &LOCAL_CLUSTER->ppm;
396
397        // lock the PPM dirty_list
398        queuelock_acquire( &ppm->dirty_lock );
399
400        if( !page_is_flag( page , PG_DIRTY ) )
401        {
402                // set dirty flag in page descriptor
403                page_set_flag( page , PG_DIRTY );
404
405                // register page in PPM dirty list
406                list_add_first( &ppm->dirty_root , &page->list );
407                done = true;
408        }
409
410        // unlock the PPM dirty_list
411        queuelock_release( &ppm->dirty_lock );
412
413        return done;
414}
415
416///////////////////////////////////////////
417bool_t ppm_page_undo_dirty( page_t * page )
418{
419        bool_t done = false;
420
421        ppm_t * ppm = &LOCAL_CLUSTER->ppm;
422
423        // lock the dirty_list
424        queuelock_acquire( &ppm->dirty_lock );
425
426        if( page_is_flag( page , PG_DIRTY) )
427        {
428                // clear dirty flag in page descriptor
429                page_clear_flag( page , PG_DIRTY );
430
431                // remove page from PPM dirty list
432                list_unlink( &page->list );
433                done = true;
434        }
435
436        // unlock the dirty_list
437        queuelock_release( &ppm->dirty_lock );
438
439        return done;
440}
441
442///////////////////////////////
443void ppm_sync_all_pages( void )
444{
445        page_t   * page;
446        ppm_t    * ppm = &LOCAL_CLUSTER->ppm;
447
448        // get the PPM dirty_list lock
449        queuelock_acquire( &ppm->dirty_lock );
450
451        while( !list_is_empty( &ppm->dirty_root ) )
452        {
453                page = LIST_FIRST( &ppm->dirty_root ,  page_t , list );
454
455                // get the page lock
456                remote_busylock_acquire( XPTR( local_cxy, &page->lock ) );
457
458                // sync the page
459                vfs_mapper_move_page( page , false );  // from mapper
460
461                // release the page lock
462                remote_busylock_release( XPTR( local_cxy , &page->lock ) );
463        }
464
465        // release the PPM dirty_list lock
466        queuelock_release( &ppm->dirty_lock );
467}
468
Note: See TracBrowser for help on using the repository browser.