Changeset 567 for trunk/kernel/mm/ppm.h


Ignore:
Timestamp:
Oct 5, 2018, 12:01:52 AM (6 years ago)
Author:
alain
Message:

Complete restructuration of kernel locks.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/kernel/mm/ppm.h

    r486 r567  
    11/*
    2  * ppm.h - Per-cluster Physical Pages Manager Interface
     2 * ppm.h - Per-cluster Physical Pages Manager definition.
    33 *
    44 * Authors  Ghassan Almaless (2008,2009,2010,2011,2012)
    5  *          Alain Greiner    (2016)
     5 *          Alain Greiner    (2016,2017,2018)
    66 *
    77 * Copyright (c) UPMC Sorbonne Universites
     
    2828#include <hal_kernel_types.h>
    2929#include <list.h>
    30 #include <spinlock.h>
     30#include <busylock.h>
     31#include <queuelock.h>
    3132#include <boot_info.h>
    3233#include <page.h>
     
    3637 * This structure defines the Physical Pages Manager in a cluster.
    3738 * In each cluster, the physical memory bank starts at local physical address 0 and
    38  * contains an integer number of pages, is defined by the <pages_nr> field in the
     39 * contains an integer number of pages, defined by the <pages_nr> field in the
    3940 * boot_info structure. It is split in three parts:
    4041 * - the "kernel_code" section contains the kernel code, loaded by the boot-loader.
     
    4344 * - the "pages_tbl" section contains the physical page descriptors array. It starts
    4445 *   at PPN = pages_offset, and it contains one entry per small physical page in cluster.
    45  *   It is created and initialized by the hal_ppm_create() function. "the
     46 *   It is created and initialized by the hal_ppm_create() function.
    4647 * - The "kernel_heap" section contains all physical pages that are are not in the
    47  *   in the kernel_code and pages_tbl sections, and that have not been reserved by the
     48 *   kernel_code and pages_tbl sections, and that have not been reserved by the
    4849 *   architecture specific bootloader. The reserved pages are defined in the boot_info
    4950 *   structure.
    5051 *
    5152 * The main service provided by the PMM is the dynamic allocation of physical pages
    52  * from the "kernel_heap" section.
    53  * This low-level allocator implements the buddy algorithm: an allocated block is
    54  * an integer number n of 4 small pages, and n (called order) is a power of 2.
     53 * from the "kernel_heap" section. This low-level allocator implements the buddy
     54 * algorithm: an allocated block is an integer number n of small pages, where n
     55 * is a power of 2, and ln(n) is called order.
     56 * This allocator being shared by the local threads, the free_page lists rooted
     57 * in the PPM descriptor are protected by a local busylock, because it is used
     58 * by the idle_thread during kernel_init().
     59 *
     60 * Another service is to register the dirty pages in a specific dirty_list, that is
     61 * also rooted in the PPM, in order to be able to save all dirty pages on disk.
     62 * This dirty list is protected by a specific local queuelock.
    5563 ****************************************************************************************/
    5664
    5765typedef struct ppm_s
    5866{
    59         spinlock_t     free_lock;               /*! lock protecting free_pages[] lists      */
     67        busylock_t     free_lock;               /*! lock protecting free_pages[] lists      */
    6068        list_entry_t   free_pages_root[CONFIG_PPM_MAX_ORDER];  /*! roots of free lists      */
    6169        uint32_t       free_pages_nr[CONFIG_PPM_MAX_ORDER];    /*! numbers of free pages    */
    6270        page_t       * pages_tbl;               /*! pointer on page descriptors array       */
    6371        uint32_t       pages_nr;                /*! total number of small physical page     */
    64     spinlock_t     dirty_lock;              /*! lock protecting the dirty pages list    */
     72    queuelock_t    dirty_lock;              /*! lock protecting dirty pages list        */
    6573    list_entry_t   dirty_root;              /*! root of dirty pages list                */
    6674    void         * vaddr_base;              /*! pointer on local physical memory base   */
     
    6876ppm_t;
    6977
     78/************** functions to allocate / release physical pages  *************************/
     79
    7080/*****************************************************************************************
    7181 * This is the low-level physical pages allocation function.
     
    107117
    108118
     119/************** functions to translate [ page <->  base <-> ppn ] ***********************/
    109120
    110121/*****************************************************************************************
     
    175186error_t ppm_assert_order( ppm_t * ppm );
    176187
     188
     189/*********** functions to handle dirty pages  *******************************************/
     190
     191/*****************************************************************************************
     192 * This function registers a physical page as dirty.
     193 * - it takes the queuelock protecting the PPM dirty_list.
     194 * - it test the PG_DIRTY flag in the page descriptor.
     195 *   . if page already dirty => do nothing
     196 *   . it page not dirty => set the PG_DIRTY flag and register page in PPM dirty list.
     197 * - it releases the queuelock protecting the PPM dirty_list.
     198 *****************************************************************************************
     199 * @ page     : pointer on page descriptor.
     200 * @ returns true if page was not dirty / returns false if page was dirty
     201 ****************************************************************************************/
     202bool_t ppm_page_do_dirty( page_t * page );
     203
     204/*****************************************************************************************
     205 * This function unregisters a physical page as dirty.
     206 * - it takes the queuelock protecting the PPM dirty_list.
     207 * - it test the PG_DIRTY flag in the page descriptor.
     208 *   . if page not dirty => do nothing
     209 *   . it page dirty => reset the PG_DIRTY flag and remove page from PPM dirty list.
     210 * - it releases the queuelock protecting the PPM dirty_list.
     211 *****************************************************************************************
     212 * @ page     : pointer on page descriptor.
     213 * @ returns true if page was dirty / returns false if page was not dirty
     214 ****************************************************************************************/
     215bool_t ppm_page_undo_dirty( page_t * page );
     216
     217/*****************************************************************************************
     218 * This function synchronizes (i.e. update the disk) all dirty pages in a cluster.
     219 * - it takes the queuelock protecting the PPM dirty_list.
     220 * - it scans the PPM dirty list, and for each page:
     221 *   . it takes the lock protecting the page.
     222 *   . it removes the page from the PPM dirty_list.
     223 *   . it reset the PG_DIRTY flag.
     224 *   . it releases the lock protecting the page.
     225 * - it releases the queuelock protecting the PPM dirty_list.
     226 $ The PPM dirty_list is empty when the sync operation completes.
     227 ****************************************************************************************/
     228void ppm_sync_all_pages( void );
     229
    177230#endif  /* _PPM_H_ */
Note: See TracChangeset for help on using the changeset viewer.