Changeset 20 for trunk/kernel/mm/khm.c
- Timestamp:
- Jun 3, 2017, 6:34:20 PM (6 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/kernel/mm/khm.c
r18 r20 39 39 void khm_init( khm_t * khm ) 40 40 { 41 42 43 41 // check config parameters 42 assert( ((CONFIG_PPM_PAGE_SHIFT + CONFIG_PPM_HEAP_ORDER) < 32 ) , __FUNCTION__ , 43 "CONFIG_PPM_HEAP_ORDER too large" ); 44 44 45 45 // initialize lock 46 46 spinlock_init( &khm->lock ); 47 47 48 49 48 // compute kernel heap size 49 intptr_t heap_size = (1 << CONFIG_PPM_HEAP_ORDER) << CONFIG_PPM_PAGE_SHIFT; 50 50 51 52 53 51 // get kernel heap base from PPM 52 page_t * page = ppm_alloc_pages( CONFIG_PPM_HEAP_ORDER ); 53 void * heap_base = ppm_page2base( page ); 54 54 55 // initializes first block == complete heap 55 // initialize first block (complete heap) 56 56 khm_block_t * block = (khm_block_t *)heap_base; 57 57 block->size = heap_size; 58 58 block->busy = 0; 59 59 60 // initializesKHM fields60 // initialize KHM fields 61 61 khm->base = (intptr_t)heap_base; 62 62 khm->size = heap_size; … … 72 72 uint32_t effective_size; 73 73 74 74 // compute actual block size 75 75 effective_size = size + sizeof(khm_block_t); 76 76 effective_size = ARROUND_UP( effective_size, CONFIG_CACHE_LINE_SIZE ); 77 77 78 78 // get lock protecting heap 79 79 spinlock_lock( &khm->lock ); 80 80 81 82 83 81 // define a starting block to scan existing blocks 82 if( ((khm_block_t*)khm->next)->size < effective_size ) current = (khm_block_t*)khm->base; 83 else current = (khm_block_t*)khm->next; 84 84 85 // scan all existing blocks to find a large enough free block 85 // scan all existing blocks to find a free block large enough 86 86 while( current->busy || (current->size < effective_size)) 87 87 { 88 88 // get next block pointer 89 89 current = (khm_block_t*)((char*)current + current->size); 90 90 … … 99 99 } 100 100 101 // split the current block if current blockis too large101 // split the current block if it is too large 102 102 if( (current->size - effective_size) >= CONFIG_CACHE_LINE_SIZE ) 103 103 { 104 104 // update new free block features 105 105 next = (khm_block_t *)((char*)current + effective_size); 106 106 next->size = current->size - effective_size; 107 107 next->busy = 0; 108 108 109 109 // register new free block 110 110 khm->next = (intptr_t)next; 111 111 112 112 // update allocated block features 113 113 current->size = effective_size; 114 114 current->busy = 1; 115 115 } 116 116 else 117 118 117 { 118 // change block state 119 119 current->busy = 1; 120 120 } 121 121 122 122 // release lock protecting heap 123 123 spinlock_unlock( &khm->lock ); 124 124 … … 138 138 current = (khm_block_t *)((char*)ptr - sizeof(khm_block_t)); 139 139 140 140 // get lock protecting heap 141 141 spinlock_lock(&khm->lock); 142 142 143 143 // release block 144 144 current->busy = 0; 145 145 146 146 // try to merge released block with the next 147 147 while ( 1 ) 148 148 { 149 149 next = (khm_block_t*)((char*)current + current->size); 150 150 if ( ((intptr_t)next >= (khm->base + khm->size)) || (next->busy == 1) ) break; 151 151 current->size += next->size; … … 154 154 if( (intptr_t)current < khm->next ) khm->next = (intptr_t)current; 155 155 156 156 // release lock protecting heap 157 157 spinlock_unlock( &khm->lock ); 158 158 }
Note: See TracChangeset
for help on using the changeset viewer.