source: trunk/kernel/mm/vmm.h @ 560

Last change on this file since 560 was 469, checked in by alain, 6 years ago

1) Introduce the libsemaphore library.
2) Introduce a small libmath library, required by the "fft" application..
3) Introduce the multithreaded "fft" application.
4) Fix a bad synchronisation bug in the Copy-On-Write mechanism.

File size: 24.8 KB
RevLine 
[1]1/*
2 * vmm.h - virtual memory management related operations
3 *
4 * Authors   Ghassan Almaless (2008,2009,2010,2011, 2012)
5 *           Mohamed Lamine Karaoui (2015)
[437]6 *           Alain Greiner (2016,2017,2018)
[18]7 *
[1]8 * Copyright (c) UPMC Sorbonne Universites
9 *
10 * This file is part of ALMOS-MKH.
11 *
12 * ALMOS-MKH is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; version 2.0 of the License.
15 *
16 * ALMOS-MKH is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with ALMOS-MKH; if not, write to the Free Software Foundation,
23 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24 */
25
26#ifndef _VMM_H_
27#define _VMM_H_
28
[457]29#include <hal_kernel_types.h>
[1]30#include <bits.h>
31#include <list.h>
32#include <spinlock.h>
33#include <hal_gpt.h>
34#include <vseg.h>
35#include <page.h>
36
37/****  Forward declarations  ****/
38
39struct process_s;
40
41/*********************************************************************************************
[407]42 * This structure defines the STACK allocator used by the VMM to dynamically handle
43 * a STACK vseg requested or released by an user process.
44 * This allocator handles a fixed size array of fixed size slots in the STACK zone.
[1]45 * The stack size and the number of slots are defined by the CONFIG_VMM_STACK_SIZE, and
[407]46 * CONFIG_VMM_STACK_BASE parameters.
[1]47 * Each slot can contain one user stack vseg. The first page in the slot is not allocated
48 * to detect stack overflow.
49 * The slot index can be computed form the slot base address, and reversely.
50 * All allocation / release operations are registered in the stack_bitmap, that completely
[18]51 * define the STACK zone state.
[1]52 ********************************************************************************************/
53
54typedef struct stack_mgr_s
55{
56    spinlock_t     lock;               /*! lock protecting STACK allocator                  */
57    vpn_t          vpn_base;           /*! first page of STACK zone                         */
58    bitmap_t       bitmap;             /*! bit bector of allocated stacks                   */
59}
60stack_mgr_t;
61
62/*********************************************************************************************
[407]63 * This structure defines the MMAP allocator used by the VMM to dynamically handle 
[1]64 * MMAP vsegs requested or released by an user process.
[18]65 * This allocator should be only used in the reference cluster.
66 * - allocation policy : all allocated vsegs occupy an integer number of pages that is
[1]67 *   power of 2, and are aligned on a page boundary. The requested number of pages is
[18]68 *   rounded if required. The first_free_vpn variable defines completely the MMAP zone state.
[1]69 *   It is never decremented, as the released vsegs are simply registered in a zombi_list.
[18]70 *   The relevant zombi_list is checked first for each allocation request.
[1]71 * - release policy : a released MMAP vseg is registered in an array of zombi_lists.
72 *   This array is indexed by ln(number of pages), and each entry contains the root of
73 *   a local list of zombi vsegs that have the same size. The physical memory allocated
74 *   for a zombi vseg descriptor is not released, to use the "list" field.
75 *   This physical memory allocated for MMAP vseg descriptors is actually released
76 *   when the VMM is destroyed.
77 ********************************************************************************************/
78
79typedef struct mmap_mgr_s
80{
81    spinlock_t     lock;               /*! lock protecting MMAP allocator                   */
82    vpn_t          vpn_base;           /*! first page of MMAP zone                          */
83    vpn_t          vpn_size;           /*! number of pages in MMAP zone                     */
84    vpn_t          first_free_vpn;     /*! first free page in MMAP zone                     */
85    list_entry_t   zombi_list[32];     /*! array of roots of released vsegs lists           */
86}
87mmap_mgr_t;
88
89/*********************************************************************************************
90 * This structure defines the Virtual Memory Manager for a given process in a given cluster.
[408]91 * This local VMM provides four main services:
92 * 1) It registers all vsegs in the local copy of the vseg list (VSL).
93 * 2) It contains the local copy of the generic page table (GPT).
94 * 3) The stack manager dynamically allocates virtual memory space for the STACK vsegs.
95 * 4) The mmap manager dynamically allocates virtual memory for the (FILE/ANON/REMOTE) vsegs.
96 ******************************************************a**************************************
97 * Implementation notes:
98 * 1. The VSL contains only local vsegs, but it is implemented as an xlist, and protected by
99 *    a remote_rwlock, because it can be accessed by a thread running in a remote cluster.
100 *    An exemple is the vmm_fork_copy() function.
[433]101 * 2. In most clusters, the VSL and GPT are only partial copies of the reference VSL and GPT
[408]102 *    structures, stored in the reference cluster.
[1]103 ********************************************************************************************/
104
105typedef struct vmm_s
106{
[408]107        remote_rwlock_t  vsegs_lock;         /*! lock protecting the vsegs list                 */
108        xlist_entry_t    vsegs_root;         /*! VSL root (VSL only complete in reference)      */
109        uint32_t         vsegs_nr;           /*! total number of local vsegs                    */
[1]110
[408]111    gpt_t            gpt;                /*! Generic Page Table (complete in reference)     */
[1]112
[408]113    stack_mgr_t      stack_mgr;          /*! embedded STACK vsegs allocator                 */
114    mmap_mgr_t       mmap_mgr;           /*! embedded MMAP vsegs allocator                  */
[1]115
[408]116        uint32_t         pgfault_nr;         /*! page fault counter (instrumentation)           */
[1]117
[408]118    vpn_t            kent_vpn_base;      /*! kentry vseg first page                         */
119    vpn_t            args_vpn_base;      /*! args vseg first page                           */
120    vpn_t            envs_vpn_base;      /*! envs zone first page                           */
121    vpn_t            heap_vpn_base;      /*! envs zone first page                           */
122        vpn_t            code_vpn_base;      /*! code zone first page                           */
123        vpn_t            data_vpn_base;      /*! data zone first page                           */
[1]124
[408]125        intptr_t         entry_point;        /*! main thread entry point                        */
[1]126}
127vmm_t;
128
129/*********************************************************************************************
[406]130 * This function initialises the virtual memory manager attached to an user process.
[407]131 * - It initializes the STACK and MMAP allocators.
132 * - It registers the "kentry", "args", "envs" vsegs in the VSL.
[409]133 * - It initializes the generic page table, calling the HAL specific hal_gpt_init() function.
134 * - For TSAR it map all pages for the "kentry" vseg, that must be identity mapping.
135 * Note:
[407]136 * - The "code" and "data" vsegs are registered by the elf_load_process() function.
137 * - The "stack" vsegs are dynamically created by the thread_user_create() function.
[409]138 * - The "file", "anon", "remote" vsegs are dynamically created by the mmap() syscall.
[1]139 *********************************************************************************************
140 * @ process   : pointer on process descriptor
[415]141 * @ return 0 if success / return -1 if failure.
[1]142 ********************************************************************************************/
[415]143error_t vmm_init( struct process_s * process );
[1]144
145/*********************************************************************************************
[407]146 * This function displays on TXY0 the list or registered vsegs for a given <process>.
[429]147 * It must be executed by a thread running in reference cluster.
148 * If the <mapping> argument is true, it displays for each vseg all mapped PTEs in GPT.
[23]149 *********************************************************************************************
[407]150 * @ process   : pointer on process descriptor.
151 * @ mapping   : detailed mapping if true.
152 ********************************************************************************************/
153void vmm_display( struct process_s * process,
154                  bool_t             mapping );
155
156/*********************************************************************************************
[433]157 * This function is called by the process_make_fork() function. It partially copies
[408]158 * the content of a remote parent process VMM to the local child process VMM:
159 * - all DATA, MMAP, REMOTE vsegs registered in the parent VSL are registered in the child
160 *   VSL, and all valid GPT entries in parent GPT are copied to the child GPT.
161 *   The WRITABLE flag is reset and the COW flag is set in child GPT.
162 * - all CODE vsegs registered in the parent VSL are registered in the child VSL, but the
163 *   GPT entries are not copied in the chilf GPT, that will be dynamically updated from
164 *   the .elf file when a page fault is reported.
165 * - all FILE vsegs registered in the parent VSL are registered in the child VSL, and all
166 *   valid GPT entries in parent GPT are copied to the child GPT. The COW flag is not set.
167 * - no STACK vseg is copied from  parent VMM to child VMM, because the child STACK vseg
[469]168 *   must be copied later from the cluster containing the user thread requesting the fork().
[407]169 *********************************************************************************************
[408]170 * @ child_process     : local pointer on local child process descriptor.
171 * @ parent_process_xp : extended pointer on remote parent process descriptor.
[415]172 * @ return 0 if success / return -1 if failure.
[23]173 ********************************************************************************************/
[408]174error_t vmm_fork_copy( struct process_s * child_process,
175                       xptr_t             parent_process_xp );
[23]176
177/*********************************************************************************************
[433]178 * This function is called by the process_make_fork() function executing the fork syscall.
[408]179 * It set the COW flag, and reset the WRITABLE flag of all GPT entries of the DATA, MMAP,
180 * and REMOTE vsegs of a process identified by the <process> argument.
181 * It must be called by a thread running in the reference cluster, that contains the complete
[433]182 * VSL and GPT (use the rpc_vmm_set_cow_client() when the calling thread client is remote).
[408]183 * It updates all copies of the process in all clusters, to maintain coherence in GPT copies,
184 * using the list of copies stored in the owner process, and using remote_write accesses to
[433]185 * update the remote GPTs. It atomically increment the pending_fork counter, in all involved
186 * physical page descriptors. It cannot fail, as only mapped entries in GPTs are updated.
[1]187 *********************************************************************************************
[408]188 * @ process   : local pointer on local reference process descriptor.
189 ********************************************************************************************/
190void vmm_set_cow( struct process_s * process );
191
192/*********************************************************************************************
[433]193 * This global function modifies a GPT entry identified  by the <process> and <vpn>
194 * arguments in all clusters containing a process copy.
195 * It must be called by a thread running in the reference cluster.
[408]196 * It updates all copies of the process in all clusters, to maintain coherence in GPT copies,
197 * using the list of copies stored in the owner process, and using remote_write accesses to
198 * update the remote GPTs. It cannot fail, as only mapped entries in GPT copies are updated.
199 *********************************************************************************************
200 * @ process   : local pointer on local process descriptor.
201 * @ vpn       : PTE index.
202 * @ attr      : PTE / attributes.
203 * @ ppn       : PTE / physical page index.
204 ********************************************************************************************/
[433]205void vmm_global_update_pte( struct process_s * process,
206                            vpn_t              vpn,
207                            uint32_t           attr,
208                            ppn_t              ppn );
[408]209
210/*********************************************************************************************
[433]211 * This function unmaps from the local GPT all mapped PTEs of a vseg identified by the
212 * <process> and <vseg> arguments. It can be used for any type of vseg.
213 * If this function is executed in the reference cluster, it handles for each referenced
214 * physical pages the pending forks counter :
215 * - if counter is non-zero, it decrements it.
216 * - if counter is zero, it releases the physical page to local kmem allocator.
217 *********************************************************************************************
218 * @ process  : pointer on process descriptor.
219 * @ vseg     : pointer on the vseg to be unmapped.
220 ********************************************************************************************/
221void vmm_unmap_vseg( struct process_s * process,
222                     vseg_t           * vseg );
223
224/*********************************************************************************************
225 * This function deletes, in the local cluster, all vsegs registered in the VSL
226 * of the process identified by the <process> argument. For each vseg:
227 * - it unmaps all vseg PTEs from the GPT (release the physical pages when required).
228 * - it removes the vseg from the local VSL.
229 * - it releases the memory allocated to the local vseg descriptors.
[409]230 * Finally, it releases the memory allocated to the GPT itself.
[408]231 *********************************************************************************************
[23]232 * @ process   : pointer on process descriptor.
[1]233 ********************************************************************************************/
234void vmm_destroy( struct process_s * process );
235
236/*********************************************************************************************
[18]237 * This function scans the list of vsegs registered in the VMM of a given process descriptor
[1]238 * to check if a given virtual region (defined by a base and size) overlap an existing vseg.
239 *********************************************************************************************
240 * @ process  : pointer on process descriptor.
241 * @ base     : region virtual base address.
242 * @ size     : region size (bytes).
243 * @ returns NULL if no conflict / return conflicting vseg pointer if conflict.
244 ********************************************************************************************/
245vseg_t * vmm_check_conflict( struct process_s * process,
246                             vpn_t              base,
247                             vpn_t              size );
248
249/*********************************************************************************************
[18]250 * This function allocates memory for a vseg descriptor, initialises it, and register it
[407]251 * in the VMM of the local process descriptor, that should be the reference process.
252 * For the 'stack", "file", "anon", & "remote" types, it does not use the <base> argument,
253 * but uses the STACK and MMAP virtual memory allocators.
254 * It checks collision with all pre-existing vsegs.
255 * To comply with the "on-demand" paging policy, this function does NOT modify the page table,
256 * and does not allocate physical memory for vseg data.
257 * It should be called by a local thread (could be a RPC thread if the client thread is not
258 * running in the regerence cluster).
[1]259 *********************************************************************************************
[407]260 * @ process     : pointer on local processor descriptor.
261 * @ type        : vseg type.
262 * @ base        : vseg base address (not used for dynamically allocated vsegs).
263 * @ size        : vseg size (bytes).
264 * @ file_offset : offset in file for CODE, DATA, FILE types.
265 * @ file_size   : can be smaller than "size" for DATA type.
266 * @ mapper_xp   : extended pointer on mapper for CODE, DATA, FILE types.
267 * @ cxy         : physical mapping cluster (for non distributed vsegs).
268 * @ returns pointer on vseg if success / returns NULL if no memory, or conflict.
[1]269 ********************************************************************************************/
270vseg_t * vmm_create_vseg( struct process_s * process,
[407]271                          vseg_type_t        type,
[18]272                          intptr_t           base,
[407]273                              uint32_t           size,
274                          uint32_t           file_offset,
275                          uint32_t           file_size,
276                          xptr_t             mapper_xp,
277                          cxy_t              cxy );
[1]278
279/*********************************************************************************************
[18]280 * This function removes a vseg identified by it's pointer from the VMM of the calling process.
[1]281 * - If the vseg has not the STACK or MMAP type, it is removed from the vsegs list,
282 *   and the physical memory allocated to vseg descriptor is released to KMEM.
283 * - If the vseg has the STACK type, it is removed from the vsegs list, the physical memory
284 *   allocated to vseg descriptor is released to KMEM, and the stack slot is returned to the
285 *   VMM STACK allocator.
286 * - If the vseg has the MMAP type, it is removed from the vsegs list and is registered
287 *   in the zombi_list of the VMM MMAP allocator for future reuse. The physical memory
288 *   allocated to vseg descriptor is NOT released to KMEM.
289 *********************************************************************************************
290 * @ vseg      : pointer on vseg to be removed.
291 ********************************************************************************************/
292void vmm_remove_vseg( vseg_t * vseg );
293
294/*********************************************************************************************
[18]295 * This function removes a given region (defined by a base address and a size) from
[407]296 * the VMM of a given process descriptor. This can modify the number of vsegs:
[1]297 * (a) if the region is not entirely mapped in an existing vseg, it's an error.
298 * (b) if the region has same base and size as an existing vseg, the vseg is removed.
[406]299 * (c) if the removed region cut the vseg in two parts, it is modified.
300 * (d) if the removed region cut the vseg in three parts, it is modified, and a new
301 *     vseg is created with same type.
[407]302 * FIXME [AG] this function must be called by a thread running in the reference cluster,
303 * and the VMM must be updated in all process descriptors copies.
[1]304 *********************************************************************************************
305 * @ process   : pointer on process descriptor
306 * @ base      : vseg base address
307 * @ size      : vseg size (bytes)
308 ********************************************************************************************/
309error_t vmm_resize_vseg( struct process_s * process,
310                         intptr_t           base,
311                         intptr_t           size );
312
313/*********************************************************************************************
[388]314 * This function checks that a given virtual address is contained in a registered vseg.
[399]315 * It can be called by any thread running in any cluster:
316 * - if the vseg is registered in the local process VMM, it returns the local vseg pointer.
[388]317 * - if the vseg is missing in local VMM, it uses a RPC to get it from the reference cluster,
318 *   register it in local VMM and returns the local vseg pointer, if success.
[406]319 * - it returns an user error if the vseg is missing in the reference VMM, or if there is
320 *   not enough memory for a new vseg descriptor in cluster containing the calling thread.
[1]321 *********************************************************************************************
[388]322 * @ process   : [in] pointer on process descriptor
323 * @ vaddr     : [in] virtual address
[440]324 * @ vseg      : [out] local pointer on local vseg
325 * @ returns 0 if success / returns -1 if user error (out of segment).
[1]326 *********************************************************************************************/
[388]327error_t vmm_get_vseg( struct process_s  * process,
328                      intptr_t            vaddr,
[394]329                      vseg_t           ** vseg );           
[1]330
331/*********************************************************************************************
[440]332 * This function is called by the generic exception handler in case of page-fault,
333 * or copy-on-write event locally detected for a given <vpn> in a given <process>
334 * as defined by the <is_cow> argument.
335 * 1) For a Page-Fault:
336 * - If the local cluster is the reference, or for the STACK and CODE segment types,
337 *   it call directly the vmm_get_pte() function to access the local VMM.
338 * - Otherwise, it send a RPC_VMM_GET_PTE to the reference cluster to get the missing
339 *   PTE attributes and PPN.
340 * This function check that the missing VPN belongs to a registered vseg, allocates
341 * a new physical page if required, and updates the local page table.
342 * 2) For a Copy-On-Write:
343 * - If no pending fork, it reset the COW flag and set the WRITE flag in the reference
344 *   GPT entry, and in all the GPT copies.
345 * - If there is a pending fork, it allocates a new physical page from the cluster defined
346 *   by the vseg type, copies the old physical page content to the new physical page,
347 *   and decrements the pending_fork counter in old physical page descriptor.
[1]348 *********************************************************************************************
[440]349 * @ process   : pointer on local process descriptor copy.
350 * @ vpn       : VPN of the missing or faulting PTE.
351 * @ is_cow    : Copy-On-Write event if true / Page-fault if false.
352 * @ returns 0 if success / returns ENOMEM if no memory or illegal VPN.
[1]353 ********************************************************************************************/
354error_t vmm_handle_page_fault( struct process_s * process,
[440]355                               vpn_t              vpn,
356                               bool_t             is_cow );
[1]357
358/*********************************************************************************************
[440]359 * This function is called by the vmm_handle_page_fault() to handle both the "page-fault",
360 * and the "copy-on_write" events for a given <vpn> in a given <process>, as defined
361 * by the <is_cow> argument.
362 * The vseg containing the searched VPN must be registered in the reference VMM.
[433]363 * - for an page-fault, it allocates the missing physical page from the target cluster
364 *   defined by the vseg type, initializes it, and updates the reference GPT, but not
365 *   the copies GPT, that will be updated on demand.
[407]366 * - for a copy-on-write, it allocates a new physical page from the target cluster,
[433]367 *   initialise it from the old physical page, and updates the reference GPT and all
368 *   the GPT copies, for coherence.
[440]369 * It calls the RPC_PMEM_GET_PAGES to get the new physical page when the target cluster
370 * is not the local cluster,
[407]371 * It returns in the <attr> and <ppn> arguments the accessed or modified PTE.
372 *********************************************************************************************
[1]373 * @ process   : [in] pointer on process descriptor.
374 * @ vpn       : [in] VPN defining the missing PTE.
[440]375 * @ is_cow    : [in] "copy_on_write" if true / "page_fault" if false.
[1]376 * @ attr      : [out] PTE attributes.
377 * @ ppn       : [out] PTE ppn.
378 * @ returns 0 if success / returns ENOMEM if error.
379 ********************************************************************************************/
380error_t vmm_get_pte( struct process_s * process,
381                     vpn_t              vpn,
[440]382                     bool_t             is_cow,
[1]383                     uint32_t         * attr,
384                     ppn_t            * ppn );
385
386/*********************************************************************************************
[401]387 * This function is called by the vmm_get_pte() function when a page is unmapped.
[313]388 * Depending on the vseg type, defined by the <vseg> argument, it returns the PPN
389 * (Physical Page Number) associated to a missing page defined by the <vpn> argument.
[406]390 * - For the FILE type, it returns directly the physical page from the file mapper.
[433]391 * - For the CODE and DATA types, it allocates a new physical page from the cluster defined
[406]392 *   by the <vseg->cxy> field, or by the <vpn> MSB bits for a distributed vseg,
393 *   and initialize this page from the .elf file mapper.
394 * - For all other types, it allocates a new physical page from the cluster defined
395 *   by the <vseg->cxy> field, or by the <vpn> MSB bits for a distributed vseg,
396 *   but the new page is not initialized.
[313]397 *********************************************************************************************
398 * @ vseg   : local pointer on vseg containing the mising page.
399 * @ vpn    : Virtual Page Number identifying the missing page.
400 * @ ppn    : [out] returned Physical Page Number.
[401]401 * return 0 if success / return EINVAL or ENOMEM if error.
[313]402 ********************************************************************************************/
403error_t vmm_get_one_ppn( vseg_t * vseg,
404                         vpn_t    vpn,
405                         ppn_t  * ppn );
406
[1]407
408#endif /* _VMM_H_ */
Note: See TracBrowser for help on using the repository browser.