source: trunk/kernel/mm/khm.c @ 440

Last change on this file since 440 was 315, checked in by alain, 7 years ago

Redefine the fuctions ppm_base2page() / ppm_page2base() / ppm_page2ppn() / ppm_ppn2page() / ppm_base2ppn() / ppm_ppn2base(),
to use explicitely extended pointers.

File size: 4.3 KB
Line 
1/*
2 * khm.c - kernel heap manager implementation.
3 *
4 * Authors  Ghassan Almaless (2008,2009,2010,2011,2012)
5 *          Alain Greiner (2016)
6 *
7 * Copyright (c)  UPMC Sorbonne Universites
8 *
9 * This file is part of ALMOS-MKH.
10 *
11 * ALMOS-MKH is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; version 2.0 of the License.
14 *
15 * ALMOS-MKH is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with ALMOS-MKH; if not, write to the Free Software Foundation,
22 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
23 */
24
25#include <kernel_config.h>
26#include <hal_types.h>
27#include <hal_special.h>
28#include <spinlock.h>
29#include <bits.h>
30#include <printk.h>
31#include <thread.h>
32#include <cluster.h>
33#include <page.h>
34#include <ppm.h>
35#include <khm.h>
36
37
38////////////////////////////
39void khm_init( khm_t * khm )
40{
41        // check config parameters
42        assert( ((CONFIG_PPM_PAGE_SHIFT + CONFIG_PPM_HEAP_ORDER) < 32 ) , __FUNCTION__ ,
43                 "CONFIG_PPM_HEAP_ORDER too large" );
44
45        // initialize lock
46        spinlock_init( &khm->lock );
47
48        // compute kernel heap size
49        intptr_t heap_size = (1 << CONFIG_PPM_HEAP_ORDER) << CONFIG_PPM_PAGE_SHIFT;
50
51        // get kernel heap base from PPM
52        page_t * page      = ppm_alloc_pages( CONFIG_PPM_HEAP_ORDER );
53        xptr_t   base_xp   = ppm_page2base( XPTR( local_cxy, page ) );
54        void   * heap_base = GET_PTR( base_xp );
55
56        // initialize first block (complete heap)
57        khm_block_t * block = (khm_block_t *)heap_base;
58        block->size = heap_size;
59        block->busy = 0;
60
61        // initialize KHM fields
62        khm->base    = (intptr_t)heap_base;
63        khm->size    = heap_size;
64        khm->next    = (intptr_t)heap_base;
65}
66
67/////////////////////////////////
68void * khm_alloc( khm_t    * khm,
69                  uint32_t   size )
70{
71        khm_block_t  * current;
72        khm_block_t  * next;
73        uint32_t       effective_size;
74
75        // compute actual block size
76        effective_size = size + sizeof(khm_block_t);
77        effective_size = ARROUND_UP( effective_size, CONFIG_CACHE_LINE_SIZE );
78
79        // get lock protecting heap
80        spinlock_lock( &khm->lock );
81
82        // define a starting block to scan existing blocks
83        if( ((khm_block_t*)khm->next)->size < effective_size ) current = (khm_block_t*)khm->base;
84        else                                                   current = (khm_block_t*)khm->next;
85
86        // scan all existing blocks to find a free block large enough
87        while( current->busy || (current->size < effective_size))
88        {
89                // get next block pointer
90                current = (khm_block_t*)((char*)current + current->size);
91
92                if( (intptr_t)current >= (khm->base + khm->size) )  // heap full
93                {
94                        spinlock_unlock(&khm->lock);
95
96                        printk("\n[ERROR] in %s : failed to allocate block of size %d\n",
97                               __FUNCTION__ , effective_size );
98                        return NULL;
99                }
100        }
101
102        // split the current block if it is too large
103        if( (current->size - effective_size) >= CONFIG_CACHE_LINE_SIZE )
104        {
105                // update new free block features
106                next           = (khm_block_t *)((char*)current + effective_size);
107                next->size     = current->size - effective_size;
108                next->busy     = 0;
109
110                // register new free block
111                khm->next = (intptr_t)next;
112
113                // update allocated block features
114                current->size  = effective_size;
115                current->busy  = 1;
116        }
117        else
118        {
119                // change block state
120                current->busy  = 1;
121        }
122
123        // release lock protecting heap
124        spinlock_unlock( &khm->lock );
125
126        return (char*)current + sizeof(khm_block_t);
127}
128
129///////////////////////////
130void khm_free( void * ptr )
131{
132        khm_t * khm = &LOCAL_CLUSTER->khm;
133
134        khm_block_t * current;
135        khm_block_t * next;
136
137        if(ptr == NULL) return;
138
139        current = (khm_block_t *)((char*)ptr - sizeof(khm_block_t));
140
141        // get lock protecting heap
142        spinlock_lock(&khm->lock);
143
144        assert( (current->busy == 1) , __FUNCTION__ , "page already freed" );
145
146        // release block
147        current->busy = 0;
148
149        // try to merge released block with the next
150        while ( 1 )
151        {
152                next = (khm_block_t*)((char*)current + current->size);
153                if ( ((intptr_t)next >= (khm->base + khm->size)) || (next->busy == 1) ) break;
154                current->size += next->size;
155        }
156
157        if( (intptr_t)current < khm->next ) khm->next = (intptr_t)current;
158
159        // release lock protecting heap
160        spinlock_unlock( &khm->lock );
161}
162
Note: See TracBrowser for help on using the repository browser.