Changeset 567


Ignore:
Timestamp:
Oct 5, 2018, 12:01:52 AM (6 years ago)
Author:
alain
Message:

Complete restructuration of kernel locks.

Location:
trunk/kernel/mm
Files:
15 edited

Legend:

Unmodified
Added
Removed
  • trunk/kernel/mm/kcm.c

    r551 r567  
    11/*
    2  * kcm.c - Per cluster & per type Kernel Cache Manager access functions
     2 * kcm.c - Per cluster Kernel Cache Manager implementation.
    33 *
    44 * Author  Ghassan Almaless (2008,2009,2010,2011,2012)
     
    2626#include <hal_kernel_types.h>
    2727#include <hal_special.h>
     28#include <busylock.h>
    2829#include <list.h>
    2930#include <printk.h>
     
    3637#include <kcm.h>
    3738
     39
    3840//////////////////////////////////////////////////////////////////////////////////////
    3941// This static function returns pointer on an allocated block from an active page.
     
    219221
    220222        // initialize lock
    221         spinlock_init( &kcm->lock );
     223        busylock_init( &kcm->lock , LOCK_KCM_STATE );
    222224
    223225        // initialize KCM type
     
    248250
    249251        // get KCM lock
    250         spinlock_lock( &kcm->lock );
     252        busylock_acquire( &kcm->lock );
    251253
    252254        // release all free pages
     
    278280
    279281        // release KCM lock
    280         spinlock_unlock( &kcm->lock );
     282        busylock_release( &kcm->lock );
    281283}
    282284
     
    288290
    289291        // get lock
    290         uint32_t     irq_state;
    291         spinlock_lock_busy( &kcm->lock, &irq_state );
     292        busylock_acquire( &kcm->lock );
    292293
    293294        // get an active page
     
    299300                if( kcm_page == NULL )
    300301                {
    301                         spinlock_unlock_busy( &kcm->lock, irq_state );
     302                        busylock_release( &kcm->lock );
    302303                        return NULL;
    303304                }
     
    319320
    320321        // release lock
    321         spinlock_unlock_busy( &kcm->lock, irq_state );
     322        busylock_release( &kcm->lock );
    322323
    323324        return ptr;
     
    336337
    337338        // get lock
    338         spinlock_lock( &kcm->lock );
     339        busylock_acquire( &kcm->lock );
    339340
    340341        // release block
     
    342343
    343344        // release lock
    344         spinlock_unlock( &kcm->lock );
     345        busylock_release( &kcm->lock );
    345346}
    346347
  • trunk/kernel/mm/kcm.h

    r457 r567  
    11/*
    2  * kcm.h - Per-cluster Kernel Cache Manager Interface
     2 * kcm.h - Per-cluster Kernel Cache Manager definition.
    33 *
    44 * Authors  Ghassan Almaless (2008,2009,2010,2011,2012)
     
    2828#include <list.h>
    2929#include <hal_kernel_types.h>
    30 #include <spinlock.h>
     30#include <busylock.h>
    3131#include <page.h>
    3232#include <bits.h>
     
    4646typedef struct kcm_s
    4747{
    48         spinlock_t           lock;             /*! protect exclusive access to allocator   */
     48        busylock_t           lock;             /*! protect KCM ammocator                   */
    4949        uint32_t             block_size;       /*! rounded block size (bytes)              */
    5050        uint32_t             blocks_nr;        /*! max number of blocks per page           */
  • trunk/kernel/mm/khm.c

    r551 r567  
    11/*
    2  * khm.c - kernel heap manager implementation.
     2 * khm.c - Kernel Heap Manager implementation.
    33 *
    44 * Authors  Ghassan Almaless (2008,2009,2010,2011,2012)
    5  *          Alain Greiner (2016)
     5 *          Alain Greiner (2016,2017,2018)
    66 *
    77 * Copyright (c)  UPMC Sorbonne Universites
     
    2626#include <hal_kernel_types.h>
    2727#include <hal_special.h>
    28 #include <spinlock.h>
     28#include <busylock.h>
    2929#include <bits.h>
    3030#include <printk.h>
     
    4444
    4545        // initialize lock
    46         spinlock_init( &khm->lock );
     46        busylock_init( &khm->lock , LOCK_KHM_STATE );
    4747
    4848        // compute kernel heap size
     
    7878
    7979        // get lock protecting heap
    80         uint32_t       irq_state;
    81         spinlock_lock_busy( &khm->lock, &irq_state );
     80        busylock_acquire( &khm->lock );
    8281
    8382        // define a starting block to scan existing blocks
     
    9392                if( (intptr_t)current >= (khm->base + khm->size) )  // heap full
    9493                {
    95                         spinlock_unlock_busy(&khm->lock, irq_state );
     94                        busylock_release(&khm->lock);
    9695
    9796                        printk("\n[ERROR] in %s : failed to allocate block of size %d\n",
     
    123122
    124123        // release lock protecting heap
    125         spinlock_unlock_busy( &khm->lock, irq_state );
     124        busylock_release( &khm->lock );
    126125
    127126        return (char*)current + sizeof(khm_block_t);
     
    141140
    142141        // get lock protecting heap
    143         spinlock_lock(&khm->lock);
     142        busylock_acquire(&khm->lock);
    144143
    145144        assert( (current->busy == 1) , "page already freed" );
     
    159158
    160159        // release lock protecting heap
    161         spinlock_unlock( &khm->lock );
     160        busylock_release( &khm->lock );
    162161}
    163162
  • trunk/kernel/mm/khm.h

    r457 r567  
    11/*
    2  * khm.h - kernel heap manager used for variable size memory allocation.
     2 * khm.h - Kernel Heap Manager definition.
    33 *
    44 * Authors  Ghassan Almaless (2008,2009,2010,2011,2012)
    5  *          Mohamed Lamine Karaoui (2015)
    6  *          Alain Greiner (2016)
     5 *          Alain Greiner (2016,2017,2018)
    76 *
    87 * Copyright (c) UPMC Sorbonne Universites
     
    2928#include <kernel_config.h>
    3029#include <hal_kernel_types.h>
    31 #include <spinlock.h>
     30#include <busylock.h>
    3231
    3332/*******************************************************************************************
    3433 * This structure defines a Kernel Heap Manager (KHM) in a given cluster.
    35  * It is used to allocate memory objects, that are not
    36  * enough replicated to justify a dedicated KCM allocator.
     34 * It is used to allocate memory objects, that are not enough replicated to justify
     35 * a dedicated KCM allocator.
    3736 ******************************************************************************************/
    3837
    3938typedef struct khm_s
    4039{
    41         spinlock_t lock;           /*! lock protecting exclusive access to heap               */
     40        busylock_t lock;           /*! lock protecting KHM allocator                          */
    4241        intptr_t   base;           /*! heap base address                                      */
    4342        uint32_t   size;           /*! heap size (bytes)                                      */
     
    5453typedef struct khm_block_s
    5554{
    56         uint32_t   busy:1;         /*! free block if zero                                     */
    57         uint32_t   size:31;        /*! size coded on 31 bits                                  */
     55        uint32_t   busy         /*! free block if zero                                     */
     56        uint32_t   size;           /*! block size                                                   */
    5857}
    5958khm_block_t;
  • trunk/kernel/mm/kmem.c

    r551 r567  
    33 *
    44 * Authors  Ghassan Almaless (2008,2009,2010,2011,2012)
    5  *          Mohamed Lamine Karaoui (2015)
    6  *          Alain Greiner (2016)
     5 *          Alain Greiner (2016,2017,2018)
    76 *
    87 * Copyright (c) UPMC Sorbonne Universites
     
    2827#include <hal_special.h>
    2928#include <printk.h>
    30 #include <spinlock.h>
    31 #include <readlock.h>
     29#include <busylock.h>
    3230#include <memcpy.h>
    3331#include <khm.h>
     
    5149#include <kmem.h>
    5250
    53 ///////////////////////////
     51/////////////////////////////////
    5452void kmem_print_kcm_table( void )
    5553{
     
    168166        kcm_init( kcm , type );
    169167
    170         // register it if the KCM pointers Table
     168        // register it in the KCM pointers Table
    171169        cluster->kcm_tbl[type] = kcm;
    172170
     
    258256                if( cluster->kcm_tbl[type] == NULL )
    259257                {
    260                         spinlock_lock_busy( &cluster->kcm_lock, &irq_state );
     258            // get lock protecting local kcm_tbl[] array
     259                        busylock_acquire( &cluster->kcm_lock );
     260
     261            // create missing KCM
    261262                        error_t error = kmem_create_kcm( type );
    262                         spinlock_unlock_busy( &cluster->kcm_lock, irq_state );
    263                         if ( error ) return NULL;
     263
     264            // release lock protecting local kcm_tbl[] array
     265                        busylock_release( &cluster->kcm_lock );
     266
     267                        if ( error )
     268            {
     269                 printk("\n[ERROR] in %s : cannot create KCM type %d in cluster %x\n",
     270                 __FUNCTION__, type, local_cxy );
     271                 return NULL;
     272            }
    264273                }
    265274
     
    269278                {
    270279                        printk("\n[ERROR] in %s : failed for type %d / size %d in cluster %x\n",
    271                             __FUNCTION__ , type , size , local_cxy );
     280                    __FUNCTION__ , type , size , local_cxy );
    272281                        return NULL;
    273282                }
  • trunk/kernel/mm/kmem.h

    r486 r567  
    33 *
    44 * Authors  Ghassan Almaless (2008,2009,2010,2011,2012)
    5  *          Mohamed Lamine Karaoui (2015)
    6  *          Alain Greiner (2016)
     5 *          Alain Greiner (2016,2017,2018)
    76 *
    87 * Copyright (c) UPMC Sorbonne Universites
  • trunk/kernel/mm/mapper.c

    r457 r567  
    3636#include <kmem.h>
    3737#include <kcm.h>
     38#include <ppm.h>
    3839#include <page.h>
    3940#include <cluster.h>
    4041#include <vfs.h>
    4142#include <mapper.h>
     43
    4244
    4345//////////////////////////////////////////////
     
    8385
    8486    // initialize mapper lock
    85     rwlock_init(  &mapper->lock );
     87    rwlock_init(  &mapper->lock , LOCK_MAPPER_STATE );
    8688
    8789    // initialize waiting threads xlist (empty)
     
    153155
    154156    // take mapper lock in READ_MODE
    155     rwlock_rd_lock( &mapper->lock );
     157    rwlock_rd_acquire( &mapper->lock );
    156158
    157159    // search page in radix tree
     
    163165
    164166        // release the lock in READ_MODE and take it in WRITE_MODE
    165         rwlock_rd_unlock( &mapper->lock );
    166         rwlock_wr_lock( &mapper->lock );
     167        rwlock_rd_release( &mapper->lock );
     168        rwlock_wr_acquire( &mapper->lock );
    167169
    168170        // second test on missing page because the page status can have been modified
     
    189191                printk("\n[ERROR] in %s : thread %x cannot allocate a page in cluster %x\n",
    190192                       __FUNCTION__ , this->trdid , local_cxy );
    191                 rwlock_wr_unlock( &mapper->lock );
     193                rwlock_wr_release( &mapper->lock );
    192194                return NULL;
    193195            }
     
    204206
    205207            // release mapper lock from WRITE_MODE
    206             rwlock_wr_unlock( &mapper->lock );
     208            rwlock_wr_release( &mapper->lock );
    207209
    208210            if( error )
     
    239241        {
    240242            // release mapper lock from WRITE_MODE
    241             rwlock_wr_unlock( &mapper->lock );
     243            rwlock_wr_release( &mapper->lock );
    242244
    243245            // wait load completion
    244             while( 1 )
     246            while( page_is_flag( page , PG_INLOAD ) == false )
    245247            {
    246                 // exit waiting loop when loaded
    247                 if( page_is_flag( page , PG_INLOAD ) == false ) break;
    248 
    249                 // deschedule
     248                // deschedule without blocking
    250249                sched_yield("waiting page loading");
    251250            }
     
    254253    else                          // page available in mapper
    255254    {
    256         rwlock_rd_unlock( &mapper->lock );
     255        rwlock_rd_release( &mapper->lock );
    257256    }
    258257
     
    284283
    285284    // take mapper lock in WRITE_MODE
    286     rwlock_wr_lock( &mapper->lock );
     285    rwlock_wr_acquire( &mapper->lock );
    287286
    288287    // remove physical page from radix tree
     
    290289
    291290    // release mapper lock from WRITE_MODE
    292     rwlock_wr_unlock( &mapper->lock );
     291    rwlock_wr_release( &mapper->lock );
    293292
    294293    // release page to PPM
     
    372371        else
    373372        {
    374             page_do_dirty( page );
     373            ppm_page_do_dirty( page );
    375374            hal_copy_from_uspace( map_ptr , buf_ptr , page_count );
    376375        }
     
    485484            dst_ptr = base_ptr + page_offset;
    486485
    487             page_do_dirty( page );
     486            ppm_page_do_dirty( page );
    488487        }
    489488
  • trunk/kernel/mm/page.c

    r486 r567  
    2727#include <hal_atomic.h>
    2828#include <list.h>
    29 #include <xlist.h>
     29#include <queuelock.h>
    3030#include <memcpy.h>
    31 #include <thread.h>
    32 #include <scheduler.h>
    33 #include <cluster.h>
    34 #include <ppm.h>
    35 #include <mapper.h>
    3631#include <printk.h>
    3732#include <vfs.h>
    3833#include <process.h>
    3934#include <page.h>
     35
    4036
    4137////////////////////////////////////////
     
    4945        page->forks    = 0;
    5046
    51         spinlock_init( &page->lock );
     47        remote_busylock_init( XPTR( local_cxy , &page->lock ), LOCK_PAGE_STATE );
     48
    5249        list_entry_init( &page->list );
    53     xlist_root_init( XPTR( local_cxy , &page->wait_root ) );
    5450}
    5551
     
    7369{
    7470    return ( (page->flags & value) ? 1 : 0 );
    75 }
    76 
    77 //////////////////////////////////////
    78 bool_t page_do_dirty( page_t * page )
    79 {
    80         bool_t done = false;
    81 
    82         ppm_t * ppm = &LOCAL_CLUSTER->ppm;
    83 
    84         // lock the PPM dirty_list
    85         spinlock_lock( &ppm->dirty_lock );
    86 
    87         if( !page_is_flag( page , PG_DIRTY ) )
    88         {
    89                 // set dirty flag in page descriptor
    90                 page_set_flag( page , PG_DIRTY );
    91 
    92                 // register page in PPM dirty list
    93                 list_add_first( &ppm->dirty_root , &page->list );
    94                 done = true;
    95         }
    96 
    97         // unlock the PPM dirty_list
    98         spinlock_unlock( &ppm->dirty_lock );
    99 
    100         return done;
    101 }
    102 
    103 ////////////////////////////////////////
    104 bool_t page_undo_dirty( page_t * page )
    105 {
    106         bool_t done = false;
    107 
    108         ppm_t * ppm = &LOCAL_CLUSTER->ppm;
    109 
    110         // lock the dirty_list
    111         spinlock_lock( &ppm->dirty_lock );
    112 
    113         if( page_is_flag( page , PG_DIRTY) )
    114         {
    115                 // clear dirty flag in page descriptor
    116                 page_clear_flag( page , PG_DIRTY );
    117 
    118                 // remove page from PPM dirty list
    119                 list_unlink( &page->list );
    120                 done = true;
    121         }
    122 
    123         // unlock the dirty_list
    124         spinlock_unlock( &ppm->dirty_lock );
    125 
    126         return done;
    127 }
    128 
    129 /////////////////////
    130 void sync_all_pages( void )
    131 {
    132         page_t   * page;
    133         ppm_t    * ppm = &LOCAL_CLUSTER->ppm;
    134 
    135         // lock the dirty_list
    136         spinlock_lock( &ppm->dirty_lock );
    137 
    138         while( !list_is_empty( &ppm->dirty_root ) )
    139         {
    140                 page = LIST_FIRST( &ppm->dirty_root ,  page_t , list );
    141 
    142                 // unlock the dirty_list
    143                 spinlock_unlock( &ppm->dirty_lock );
    144 
    145                 // lock the page
    146                 page_lock( page );
    147 
    148                 // sync the page
    149                 vfs_mapper_move_page( page , false );  // from mapper
    150 
    151                 // unlock the page
    152                 page_unlock( page );
    153 
    154                 // lock the dirty_list
    155                 spinlock_lock( &ppm->dirty_lock );
    156         }
    157 
    158         // unlock the dirty_list
    159         spinlock_unlock( &ppm->dirty_lock );
    160 
    161 }
    162 
    163 ///////////////////////////////
    164 void page_lock( page_t * page )
    165 {
    166         // take the spinlock protecting the PG_LOCKED flag
    167         spinlock_lock( &page->lock );
    168 
    169         if( page_is_flag( page , PG_LOCKED ) )  // page is already locked
    170         {
    171                 // get pointer on calling thread
    172                 thread_t * thread = CURRENT_THREAD;
    173 
    174                 // register thread in the page waiting queue
    175                 xlist_add_last( XPTR( local_cxy , &page->wait_root ),
    176                                 XPTR( local_cxy , &thread->wait_list ) );
    177 
    178                 // release the spinlock
    179                 spinlock_unlock( &page->lock );
    180 
    181                 // deschedule the calling thread
    182                 thread_block( XPTR( local_cxy , thread ) , THREAD_BLOCKED_PAGE );
    183                 sched_yield("cannot lock a page");
    184         }
    185         else                                    // page is not locked
    186         {
    187                 // set the PG_LOCKED flag
    188                 page_set_flag( page , PG_LOCKED );
    189 
    190                 // release the spinlock
    191                 spinlock_unlock( &page->lock );
    192         }
    193 }
    194 
    195 /////////////////////////////////
    196 void page_unlock( page_t * page )
    197 {
    198         // take the spinlock protecting the PG_LOCKED flag
    199         spinlock_lock( &page->lock );
    200 
    201         // check the page waiting list
    202         bool_t is_empty = xlist_is_empty( XPTR( local_cxy , &page->wait_root ) );
    203 
    204         if( is_empty == false )    // at least one waiting thread => resume it
    205         {
    206                 // get an extended pointer on the first waiting thread
    207                 xptr_t root_xp   = XPTR( local_cxy , &page->wait_root );
    208                 xptr_t thread_xp = XLIST_FIRST_ELEMENT( root_xp , thread_t , wait_list );
    209 
    210                 // reactivate the first waiting thread
    211                 thread_unblock( thread_xp , THREAD_BLOCKED_PAGE );
    212         }
    213         else                      // no waiting thread => clear the PG_LOCKED flag
    214         {
    215                 page_clear_flag( page , PG_LOCKED );
    216         }
    217 
    218         // release the spinlock
    219         spinlock_unlock( &page->lock );
    22071}
    22172
  • trunk/kernel/mm/page.h

    r486 r567  
    2828#include <kernel_config.h>
    2929#include <hal_kernel_types.h>
    30 #include <spinlock.h>
     30#include <remote_busylock.h>
    3131#include <list.h>
    32 #include <slist.h>
    33 #include <xlist.h>
    3432
    3533/***   Forward declarations   ***/
     
    3937/*************************************************************************************
    4038 * This  defines the flags that can be attached to a physical page.
     39 * TODO : the PG_BUFFER and PG_IO_ERR flags semantic is not defined
    4140 ************************************************************************************/
    4241
     
    4544#define PG_FREE             0x0004     // page can be allocated by PPM
    4645#define PG_INLOAD           0x0008     // on-going load from disk
    47 #define PG_IO_ERR           0x0010     // mapper signals a read/write access error
    48 #define PG_BUFFER           0x0020     // used in blockio.c
     46#define PG_IO_ERR           0x0010     // mapper signals access error    TODO ??? [AG]
     47#define PG_BUFFER           0x0020     // used in blockio.c              TODO ??? [AG]
    4948#define PG_DIRTY            0x0040     // page has been written
    50 #define PG_LOCKED       0x0080     // page is locked
    51 #define PG_COW          0x0100     // page is copy-on-write
     49#define PG_COW          0x0080     // page is copy-on-write
    5250
    5351#define PG_ALL          0xFFFF     // All flags
     
    5553/*************************************************************************************
    5654 * This structure defines a physical page descriptor.
    57  * Size is 64 bytes for a 32 bits core...
    58  * The spinlock is used to test/modify the forks counter.
    59  * TODO : the list of waiting threads seems to be unused [AG]
    60  * TODO : the refcount use has to be clarified
     55 * The busylock is used to test/modify the forks counter.
     56 * NOTE: Size is 44 bytes for a 32 bits core...
     57 * TODO : the refcount use has to be clarified [AG]
    6158 ************************************************************************************/
    6259
     
    6865    uint32_t          index;          /*! page index in mapper                 (4)  */
    6966        list_entry_t      list;           /*! for both dirty pages and free pages  (8)  */
    70     xlist_entry_t     wait_root;      /*! root of list of waiting threads      (16) */
    71         uint32_t          refcount;       /*! reference counter                    (4)  */
     67        uint32_t          refcount;       /*! reference counter TODO ??? [AG]      (4)  */
    7268        uint32_t          forks;          /*! number of pending forks              (4)  */
    73         spinlock_t        lock;           /*! protect the forks field              (4) */
     69        remote_busylock_t lock;           /*! protect all accesses to page         (12) */
    7470}
    7571page_t;
     
    111107
    112108/*************************************************************************************
    113  * This function synchronizes (i.e. update the disk) all dirty pages in a cluster.
    114  * It scans the PPM dirty list, that should be empty when this operation is completed.
    115  ************************************************************************************/
    116 void sync_all_pages( void );
    117 
    118 /*************************************************************************************
    119  * This function sets the PG_DIRTY flag in the page descriptor,
    120  * and registers the page in the dirty list in PPM.
    121  *************************************************************************************
    122  * @ page     : pointer on page descriptor.
    123  * @ returns true if page was not dirty / returns false if page was dirty
    124  ************************************************************************************/
    125 bool_t page_do_dirty( page_t * page );
    126 
    127 /*************************************************************************************
    128  * This function resets the PG_DIRTY flag in the page descriptor,
    129  * and removes the page from the dirty list in PPM.
    130  *************************************************************************************
    131  * @ page     : pointer on page descriptor.
    132  * @ returns true if page was dirty / returns false if page was not dirty
    133  ************************************************************************************/
    134 bool_t page_undo_dirty( page_t * page );
    135 
    136 /*************************************************************************************
    137109 * This function resets to 0 all bytes in a given page.
    138110 *************************************************************************************
     
    140112 ************************************************************************************/
    141113void page_zero( page_t * page );
    142 
    143 /*************************************************************************************
    144  * This blocking function set the PG_LOCKED flag on the page.
    145  * It deschedule if the page has already been locked by another thread,
    146  * and returns only when the flag has been successfully set.
    147  *************************************************************************************
    148  * @ page     : pointer on page descriptor.
    149  ************************************************************************************/
    150 void page_lock( page_t * page );
    151 
    152 /*************************************************************************************
    153  * This blocking function resets the PG_LOCKED flag on the page, if there is no
    154  * other waiting thread. If there is waiting thread(s), it activates the first
    155  * waiting thread without modifying the PG_LOCKED flag.
    156  *************************************************************************************
    157  * @ page     : pointer on page descriptor.
    158  ************************************************************************************/
    159 void page_unlock( page_t * page );
    160114
    161115/*************************************************************************************
  • trunk/kernel/mm/ppm.c

    r551 r567  
    33 *
    44 * Authors  Ghassan Almaless (2008,2009,2010,2011,2012)
    5  *          Alain Greiner    (2016,2017)
     5 *          Alain Greiner    (2016,2017,2018)
    66 *
    77 * Copyright (c) UPMC Sorbonne Universites
     
    3030#include <bits.h>
    3131#include <page.h>
    32 #include <spinlock.h>
     32#include <busylock.h>
     33#include <queuelock.h>
    3334#include <thread.h>
    3435#include <cluster.h>
    3536#include <kmem.h>
    3637#include <process.h>
    37 #include <dqdt.h>
     38#include <mapper.h>
    3839#include <ppm.h>
     40
     41////////////////////////////////////////////////////////////////////////////////////////
     42//     functions to  translate [ page <-> base <-> ppn ]
     43////////////////////////////////////////////////////////////////////////////////////////
    3944
    4045////////////////////////////////////////////////
     
    4651}
    4752
    48 
    49 
    5053/////////////////////////////////////////////
    5154inline xptr_t ppm_page2base( xptr_t page_xp )
     
    139142
    140143
     144////////////////////////////////////////////////////////////////////////////////////////
     145//     functions to  allocate / release  physical pages
     146////////////////////////////////////////////////////////////////////////////////////////
    141147
    142148///////////////////////////////////////////
     
    204210uint32_t cycle = (uint32_t)hal_get_cycles();
    205211if( DEBUG_PPM_ALLOC_PAGES < cycle )
    206 printk("\n[DBG] in %s : thread %x enter for %d page(s) / cycle %d\n",
    207 __FUNCTION__ , CURRENT_THREAD , 1<<order, cycle );
     212printk("\n[DBG] in %s : thread %x in process %x enter for %d page(s) / cycle %d\n",
     213__FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid, 1<<order, cycle );
    208214#endif
    209215
     
    221227
    222228        // take lock protecting free lists
    223         uint32_t       irq_state;
    224         spinlock_lock_busy( &ppm->free_lock, &irq_state );
     229        busylock_acquire( &ppm->free_lock );
    225230
    226231        // find a free block equal or larger to requested size
     
    238243        {
    239244                // release lock protecting free lists
    240                 spinlock_unlock_busy( &ppm->free_lock, irq_state );
     245                busylock_release( &ppm->free_lock );
    241246
    242247#if DEBUG_PPM_ALLOC_PAGES
    243248cycle = (uint32_t)hal_get_cycles();
    244249if( DEBUG_PPM_ALLOC_PAGES < cycle )
    245 printk("\n[DBG] in %s : thread %x cannot allocate %d page(s) at cycle %d\n",
    246 __FUNCTION__ , CURRENT_THREAD , 1<<order, cycle );
     250printk("\n[DBG] in %s : thread %x in process %x cannot allocate %d page(s) / cycle %d\n",
     251__FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid, 1<<order, cycle );
    247252#endif
    248253
     
    274279
    275280        // release lock protecting free lists
    276         spinlock_unlock_busy( &ppm->free_lock, irq_state );
     281        busylock_release( &ppm->free_lock );
    277282
    278283#if DEBUG_PPM_ALLOC_PAGES
    279284cycle = (uint32_t)hal_get_cycles();
    280285if( DEBUG_PPM_ALLOC_PAGES < cycle )
    281 printk("\n[DBG] in %s : thread %x exit / %d page(s) allocated / ppn = %x / cycle %d\n",
    282 __FUNCTION__, CURRENT_THREAD, 1<<order, ppm_page2ppn(XPTR( local_cxy , block )), cycle );
     286printk("\n[DBG] in %s : thread %x in process %x exit for %d page(s) / ppn = %x / cycle %d\n",
     287__FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid,
     2881<<order, ppm_page2ppn(XPTR( local_cxy , block )), cycle );
    283289#endif
    284290
     
    296302uint32_t cycle = (uint32_t)hal_get_cycles();
    297303if( DEBUG_PPM_FREE_PAGES < cycle )
    298 printk("\n[DBG] in %s : thread %x enter for %d page(s) / cycle %d\n",
    299 __FUNCTION__ , CURRENT_THREAD , 1<<page->order , cycle );
     304printk("\n[DBG] in %s : thread %x in process %x enter for %d page(s) / ppn %x / cycle %d\n",
     305__FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid,
     3061<<page->order, ppm_page2ppn(XPTR(local_cxy , page)), cycle );
    300307#endif
    301308
     
    306313
    307314        // get lock protecting free_pages[] array
    308         spinlock_lock( &ppm->free_lock );
     315        busylock_acquire( &ppm->free_lock );
    309316
    310317        ppm_free_pages_nolock( page );
    311318
    312319        // release lock protecting free_pages[] array
    313         spinlock_unlock( &ppm->free_lock );
     320        busylock_release( &ppm->free_lock );
    314321
    315322#if DEBUG_PPM_FREE_PAGES
    316323cycle = (uint32_t)hal_get_cycles();
    317324if( DEBUG_PPM_FREE_PAGES < cycle )
    318 printk("\n[DBG] in %s : thread %x exit / %d page(s) released / ppn = %x / cycle %d\n",
    319 __FUNCTION__, CURRENT_THREAD, 1<<page->order, ppm_page2ppn(XPTR(local_cxy , page)), cycle );
    320 #endif
    321 
    322 }
    323 
    324 ////////////////
     325printk("\n[DBG] in %s : thread %x in process %x exit for %d page(s) / ppn %x / cycle %d\n",
     326__FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid,
     3271<<page->order, ppm_page2ppn(XPTR(local_cxy , page)), cycle );
     328#endif
     329
     330}  // end ppm_free_pages()
     331
     332//////////////////////
    325333void ppm_print( void )
    326334{
     
    332340
    333341        // get lock protecting free lists
    334         spinlock_lock( &ppm->free_lock );
     342        busylock_acquire( &ppm->free_lock );
    335343
    336344        printk("\n***  PPM in cluster %x : %d pages ***\n", local_cxy , ppm->pages_nr );
     
    351359
    352360        // release lock protecting free lists
    353         spinlock_unlock( &ppm->free_lock );
     361        busylock_release( &ppm->free_lock );
    354362}
    355363
     
    368376                {
    369377                        page = LIST_ELEMENT( iter , page_t , list );
    370 
    371378                        if( page->order != order )  return -1;
    372379                }
     
    376383}
    377384
     385
     386//////////////////////////////////////////////////////////////////////////////////////
     387//     functions to handle  dirty physical pages
     388//////////////////////////////////////////////////////////////////////////////////////
     389
     390/////////////////////////////////////////
     391bool_t ppm_page_do_dirty( page_t * page )
     392{
     393        bool_t done = false;
     394
     395        ppm_t * ppm = &LOCAL_CLUSTER->ppm;
     396
     397        // lock the PPM dirty_list
     398        queuelock_acquire( &ppm->dirty_lock );
     399
     400        if( !page_is_flag( page , PG_DIRTY ) )
     401        {
     402                // set dirty flag in page descriptor
     403                page_set_flag( page , PG_DIRTY );
     404
     405                // register page in PPM dirty list
     406                list_add_first( &ppm->dirty_root , &page->list );
     407                done = true;
     408        }
     409
     410        // unlock the PPM dirty_list
     411        queuelock_release( &ppm->dirty_lock );
     412
     413        return done;
     414}
     415
     416///////////////////////////////////////////
     417bool_t ppm_page_undo_dirty( page_t * page )
     418{
     419        bool_t done = false;
     420
     421        ppm_t * ppm = &LOCAL_CLUSTER->ppm;
     422
     423        // lock the dirty_list
     424        queuelock_acquire( &ppm->dirty_lock );
     425
     426        if( page_is_flag( page , PG_DIRTY) )
     427        {
     428                // clear dirty flag in page descriptor
     429                page_clear_flag( page , PG_DIRTY );
     430
     431                // remove page from PPM dirty list
     432                list_unlink( &page->list );
     433                done = true;
     434        }
     435
     436        // unlock the dirty_list
     437        queuelock_release( &ppm->dirty_lock );
     438
     439        return done;
     440}
     441
     442///////////////////////////////
     443void ppm_sync_all_pages( void )
     444{
     445        page_t   * page;
     446        ppm_t    * ppm = &LOCAL_CLUSTER->ppm;
     447
     448        // get the PPM dirty_list lock
     449        queuelock_acquire( &ppm->dirty_lock );
     450
     451        while( !list_is_empty( &ppm->dirty_root ) )
     452        {
     453                page = LIST_FIRST( &ppm->dirty_root ,  page_t , list );
     454
     455                // get the page lock
     456                remote_busylock_acquire( XPTR( local_cxy, &page->lock ) );
     457
     458                // sync the page
     459                vfs_mapper_move_page( page , false );  // from mapper
     460
     461                // release the page lock
     462                remote_busylock_release( XPTR( local_cxy , &page->lock ) );
     463        }
     464
     465        // release the PPM dirty_list lock
     466        queuelock_release( &ppm->dirty_lock );
     467}
     468
  • trunk/kernel/mm/ppm.h

    r486 r567  
    11/*
    2  * ppm.h - Per-cluster Physical Pages Manager Interface
     2 * ppm.h - Per-cluster Physical Pages Manager definition.
    33 *
    44 * Authors  Ghassan Almaless (2008,2009,2010,2011,2012)
    5  *          Alain Greiner    (2016)
     5 *          Alain Greiner    (2016,2017,2018)
    66 *
    77 * Copyright (c) UPMC Sorbonne Universites
     
    2828#include <hal_kernel_types.h>
    2929#include <list.h>
    30 #include <spinlock.h>
     30#include <busylock.h>
     31#include <queuelock.h>
    3132#include <boot_info.h>
    3233#include <page.h>
     
    3637 * This structure defines the Physical Pages Manager in a cluster.
    3738 * In each cluster, the physical memory bank starts at local physical address 0 and
    38  * contains an integer number of pages, is defined by the <pages_nr> field in the
     39 * contains an integer number of pages, defined by the <pages_nr> field in the
    3940 * boot_info structure. It is split in three parts:
    4041 * - the "kernel_code" section contains the kernel code, loaded by the boot-loader.
     
    4344 * - the "pages_tbl" section contains the physical page descriptors array. It starts
    4445 *   at PPN = pages_offset, and it contains one entry per small physical page in cluster.
    45  *   It is created and initialized by the hal_ppm_create() function. "the
     46 *   It is created and initialized by the hal_ppm_create() function.
    4647 * - The "kernel_heap" section contains all physical pages that are are not in the
    47  *   in the kernel_code and pages_tbl sections, and that have not been reserved by the
     48 *   kernel_code and pages_tbl sections, and that have not been reserved by the
    4849 *   architecture specific bootloader. The reserved pages are defined in the boot_info
    4950 *   structure.
    5051 *
    5152 * The main service provided by the PMM is the dynamic allocation of physical pages
    52  * from the "kernel_heap" section.
    53  * This low-level allocator implements the buddy algorithm: an allocated block is
    54  * an integer number n of 4 small pages, and n (called order) is a power of 2.
     53 * from the "kernel_heap" section. This low-level allocator implements the buddy
     54 * algorithm: an allocated block is an integer number n of small pages, where n
     55 * is a power of 2, and ln(n) is called order.
     56 * This allocator being shared by the local threads, the free_page lists rooted
     57 * in the PPM descriptor are protected by a local busylock, because it is used
     58 * by the idle_thread during kernel_init().
     59 *
     60 * Another service is to register the dirty pages in a specific dirty_list, that is
     61 * also rooted in the PPM, in order to be able to save all dirty pages on disk.
     62 * This dirty list is protected by a specific local queuelock.
    5563 ****************************************************************************************/
    5664
    5765typedef struct ppm_s
    5866{
    59         spinlock_t     free_lock;               /*! lock protecting free_pages[] lists      */
     67        busylock_t     free_lock;               /*! lock protecting free_pages[] lists      */
    6068        list_entry_t   free_pages_root[CONFIG_PPM_MAX_ORDER];  /*! roots of free lists      */
    6169        uint32_t       free_pages_nr[CONFIG_PPM_MAX_ORDER];    /*! numbers of free pages    */
    6270        page_t       * pages_tbl;               /*! pointer on page descriptors array       */
    6371        uint32_t       pages_nr;                /*! total number of small physical page     */
    64     spinlock_t     dirty_lock;              /*! lock protecting the dirty pages list    */
     72    queuelock_t    dirty_lock;              /*! lock protecting dirty pages list        */
    6573    list_entry_t   dirty_root;              /*! root of dirty pages list                */
    6674    void         * vaddr_base;              /*! pointer on local physical memory base   */
     
    6876ppm_t;
    6977
     78/************** functions to allocate / release physical pages  *************************/
     79
    7080/*****************************************************************************************
    7181 * This is the low-level physical pages allocation function.
     
    107117
    108118
     119/************** functions to translate [ page <->  base <-> ppn ] ***********************/
    109120
    110121/*****************************************************************************************
     
    175186error_t ppm_assert_order( ppm_t * ppm );
    176187
     188
     189/*********** functions to handle dirty pages  *******************************************/
     190
     191/*****************************************************************************************
     192 * This function registers a physical page as dirty.
     193 * - it takes the queuelock protecting the PPM dirty_list.
     194 * - it test the PG_DIRTY flag in the page descriptor.
     195 *   . if page already dirty => do nothing
     196 *   . it page not dirty => set the PG_DIRTY flag and register page in PPM dirty list.
     197 * - it releases the queuelock protecting the PPM dirty_list.
     198 *****************************************************************************************
     199 * @ page     : pointer on page descriptor.
     200 * @ returns true if page was not dirty / returns false if page was dirty
     201 ****************************************************************************************/
     202bool_t ppm_page_do_dirty( page_t * page );
     203
     204/*****************************************************************************************
     205 * This function unregisters a physical page as dirty.
     206 * - it takes the queuelock protecting the PPM dirty_list.
     207 * - it test the PG_DIRTY flag in the page descriptor.
     208 *   . if page not dirty => do nothing
     209 *   . it page dirty => reset the PG_DIRTY flag and remove page from PPM dirty list.
     210 * - it releases the queuelock protecting the PPM dirty_list.
     211 *****************************************************************************************
     212 * @ page     : pointer on page descriptor.
     213 * @ returns true if page was dirty / returns false if page was not dirty
     214 ****************************************************************************************/
     215bool_t ppm_page_undo_dirty( page_t * page );
     216
     217/*****************************************************************************************
     218 * This function synchronizes (i.e. update the disk) all dirty pages in a cluster.
     219 * - it takes the queuelock protecting the PPM dirty_list.
     220 * - it scans the PPM dirty list, and for each page:
     221 *   . it takes the lock protecting the page.
     222 *   . it removes the page from the PPM dirty_list.
     223 *   . it reset the PG_DIRTY flag.
     224 *   . it releases the lock protecting the page.
     225 * - it releases the queuelock protecting the PPM dirty_list.
     226 $ The PPM dirty_list is empty when the sync operation completes.
     227 ****************************************************************************************/
     228void ppm_sync_all_pages( void );
     229
    177230#endif  /* _PPM_H_ */
  • trunk/kernel/mm/vmm.c

    r561 r567  
    3131#include <printk.h>
    3232#include <memcpy.h>
    33 #include <rwlock.h>
     33#include <remote_rwlock.h>
     34#include <remote_queuelock.h>
    3435#include <list.h>
    3536#include <xlist.h>
     
    5152//////////////////////////////////////////////////////////////////////////////////
    5253
    53 extern  process_t  process_zero;   // defined in cluster.c file
    54 
     54extern  process_t  process_zero;      // allocated in cluster.c
    5555
    5656///////////////////////////////////////
     
    6565
    6666#if DEBUG_VMM_INIT
     67thread_t * this = CURRENT_THREAD;
    6768uint32_t cycle = (uint32_t)hal_get_cycles();
    6869if( DEBUG_VMM_INIT )
    69 printk("\n[DBG] %s : thread %x enter for process %x / cycle %d\n",
    70 __FUNCTION__ , CURRENT_THREAD , process->pid , cycle );
     70printk("\n[DBG] %s : thread %x in process %x enter for process %x / cycle %d\n",
     71__FUNCTION__ , this->trdid, this->process->pid, process->pid , cycle );
    7172#endif
    7273
     
    7778    vmm->vsegs_nr = 0;
    7879        xlist_root_init( XPTR( local_cxy , &vmm->vsegs_root ) );
    79         remote_rwlock_init( XPTR( local_cxy , &vmm->vsegs_lock ) );
    80 
    81     assert( ((CONFIG_VMM_KENTRY_SIZE + CONFIG_VMM_ARGS_SIZE + CONFIG_VMM_ENVS_SIZE)
    82             <= CONFIG_VMM_ELF_BASE) , "UTILS zone too small\n" );
    83 
    84     assert( (CONFIG_THREAD_MAX_PER_CLUSTER <= 32) ,
    85             "no more than 32 threads per cluster for a single process\n");
    86 
    87     assert( ((CONFIG_VMM_STACK_SIZE * CONFIG_THREAD_MAX_PER_CLUSTER) <=
    88              (CONFIG_VMM_VSPACE_SIZE - CONFIG_VMM_STACK_BASE)) ,
    89              "STACK zone too small\n");
     80        remote_rwlock_init( XPTR( local_cxy , &vmm->vsegs_lock ),LOCK_VMM_VSL );
     81
     82assert( ((CONFIG_VMM_KENTRY_SIZE + CONFIG_VMM_ARGS_SIZE + CONFIG_VMM_ENVS_SIZE)
     83<= CONFIG_VMM_ELF_BASE) , "UTILS zone too small\n" );
     84
     85assert( (CONFIG_THREADS_MAX_PER_CLUSTER <= 32) ,
     86"no more than 32 threads per cluster for a single process\n");
     87
     88assert( ((CONFIG_VMM_STACK_SIZE * CONFIG_THREADS_MAX_PER_CLUSTER) <=
     89(CONFIG_VMM_VSPACE_SIZE - CONFIG_VMM_STACK_BASE)) ,
     90"STACK zone too small\n");
    9091
    9192    // register kentry vseg in VSL
     
    171172    vmm->stack_mgr.bitmap   = 0;
    172173    vmm->stack_mgr.vpn_base = CONFIG_VMM_STACK_BASE;
    173     spinlock_init( &vmm->stack_mgr.lock );
     174    busylock_init( &vmm->stack_mgr.lock , LOCK_VMM_STACK );
    174175
    175176    // initialize MMAP allocator
     
    177178    vmm->mmap_mgr.vpn_size        = CONFIG_VMM_STACK_BASE - CONFIG_VMM_HEAP_BASE;
    178179    vmm->mmap_mgr.first_free_vpn  = CONFIG_VMM_HEAP_BASE;
    179     spinlock_init( &vmm->mmap_mgr.lock );
     180    busylock_init( &vmm->mmap_mgr.lock , LOCK_VMM_MMAP );
    180181
    181182    uint32_t i;
     
    190191cycle = (uint32_t)hal_get_cycles();
    191192if( DEBUG_VMM_INIT )
    192 printk("\n[DBG] %s : thread %x exit for process %x / entry_point = %x / cycle %d\n",
    193 __FUNCTION__ , CURRENT_THREAD , process->pid , process->vmm.entry_point , cycle );
     193printk("\n[DBG] %s : thread %x in process %x exit / process %x / entry_point = %x / cycle %d\n",
     194__FUNCTION__, this->trdid, this->process->pid, process->pid , process->vmm.entry_point , cycle );
    194195#endif
    195196
     
    209210
    210211    // get lock protecting the vseg list
    211     remote_rwlock_rd_lock( XPTR( local_cxy , &vmm->vsegs_lock ) );
     212    remote_rwlock_rd_acquire( XPTR( local_cxy , &vmm->vsegs_lock ) );
    212213
    213214    // scan the list of vsegs
     
    243244
    244245    // release the lock
    245     remote_rwlock_rd_unlock( XPTR( local_cxy , &vmm->vsegs_lock ) );
     246    remote_rwlock_rd_release( XPTR( local_cxy , &vmm->vsegs_lock ) );
    246247
    247248}  // vmm_display()
     249
     250///////////////////////////////////
     251void vmm_vseg_attach( vmm_t  * vmm,
     252                      vseg_t * vseg )
     253{
     254    // build extended pointer on rwlock protecting VSL
     255    xptr_t lock_xp = XPTR( local_cxy , &vmm->vsegs_lock );
     256
     257    // get rwlock in write mode
     258    remote_rwlock_wr_acquire( lock_xp );
     259
     260    // update vseg descriptor
     261    vseg->vmm = vmm;
     262
     263    // add vseg in vmm list
     264    xlist_add_last( XPTR( local_cxy , &vmm->vsegs_root ),
     265                    XPTR( local_cxy , &vseg->xlist ) );
     266
     267    // release rwlock in write mode
     268    remote_rwlock_wr_release( lock_xp );
     269}
     270
     271///////////////////////////////////
     272void vmm_vseg_detach( vmm_t  * vmm,
     273                      vseg_t * vseg )
     274{
     275    // build extended pointer on rwlock protecting VSL
     276    xptr_t lock_xp = XPTR( local_cxy , &vmm->vsegs_lock );
     277
     278    // get rwlock in write mode
     279    remote_rwlock_wr_acquire( lock_xp );
     280
     281    // update vseg descriptor
     282    vseg->vmm = NULL;
     283
     284    // remove vseg from vmm list
     285    xlist_unlink( XPTR( local_cxy , &vseg->xlist ) );
     286
     287    // release rwlock in write mode
     288    remote_rwlock_wr_release( lock_xp );
     289}
    248290
    249291/////////////////////i//////////////////////////
     
    274316#endif
    275317
    276     // check cluster is reference
    277     assert( (GET_CXY( process->ref_xp ) == local_cxy) ,
    278     "not called in reference cluster\n");
     318// check cluster is reference
     319assert( (GET_CXY( process->ref_xp ) == local_cxy) ,
     320"not called in reference cluster\n");
    279321
    280322    // get extended pointer on root of process copies xlist in owner cluster
     
    346388#endif
    347389
    348     // check cluster is reference
    349     assert( (GET_CXY( process->ref_xp ) == local_cxy) ,
    350     "local cluster is not process reference cluster\n");
     390// check cluster is reference
     391assert( (GET_CXY( process->ref_xp ) == local_cxy) ,
     392"local cluster is not process reference cluster\n");
    351393
    352394    // get pointer on reference VMM
     
    387429            vseg     = GET_PTR( vseg_xp );
    388430
    389             assert( (GET_CXY( vseg_xp ) == local_cxy) ,
    390             "all vsegs in reference VSL must be local\n" );
     431assert( (GET_CXY( vseg_xp ) == local_cxy) ,
     432"all vsegs in reference VSL must be local\n" );
    391433
    392434            // get vseg type, base and size
     
    444486                            lock_xp  = XPTR( page_cxy , &page_ptr->lock );
    445487
     488                            // take lock protecting "forks" counter
     489                            remote_busylock_acquire( lock_xp );
     490
    446491                            // increment "forks"
    447                             remote_spinlock_lock( lock_xp );
    448492                            hal_remote_atomic_add( forks_xp , 1 );
    449                             remote_spinlock_unlock( lock_xp );
     493
     494                            // release lock protecting "forks" counter
     495                            remote_busylock_release( lock_xp );
    450496                        }
    451497                    }   // end loop on vpn
     
    511557
    512558    // initialize the lock protecting the child VSL
    513     remote_rwlock_init( XPTR( local_cxy , &child_vmm->vsegs_lock ) );
     559    remote_rwlock_init( XPTR( local_cxy , &child_vmm->vsegs_lock ), LOCK_VMM_STACK );
    514560
    515561    // initialize the child VSL as empty
     
    529575    parent_root_xp = XPTR( parent_cxy , &parent_vmm->vsegs_root );
    530576
    531     // take the lock protecting the parent VSL
    532     remote_rwlock_rd_lock( parent_lock_xp );
     577    // take the lock protecting the parent VSL in read mode
     578    remote_rwlock_rd_acquire( parent_lock_xp );
    533579
    534580    // loop on parent VSL xlist
     
    540586
    541587        // get vseg type
    542         type = hal_remote_lw( XPTR( parent_cxy , &parent_vseg->type ) );
     588        type = hal_remote_l32( XPTR( parent_cxy , &parent_vseg->type ) );
    543589       
    544590#if DEBUG_VMM_FORK_COPY
     
    547593printk("\n[DBG] %s : thread %x found parent vseg %s / vpn_base = %x / cycle %d\n",
    548594__FUNCTION__ , CURRENT_THREAD, vseg_type_str(type),
    549 hal_remote_lw( XPTR( parent_cxy , &parent_vseg->vpn_base ) ) , cycle );
     595hal_remote_l32( XPTR( parent_cxy , &parent_vseg->vpn_base ) ) , cycle );
    550596#endif
    551597
     
    566612
    567613            // register child vseg in child VSL
    568             vseg_attach( child_vmm , child_vseg );
     614            vmm_vseg_attach( child_vmm , child_vseg );
    569615
    570616#if DEBUG_VMM_FORK_COPY
     
    573619printk("\n[DBG] %s : thread %x copied vseg %s / vpn_base = %x to child VSL / cycle %d\n",
    574620__FUNCTION__ , CURRENT_THREAD , vseg_type_str(type),
    575 hal_remote_lw( XPTR( parent_cxy , &parent_vseg->vpn_base ) ) , cycle );
     621hal_remote_l32( XPTR( parent_cxy , &parent_vseg->vpn_base ) ) , cycle );
    576622#endif
    577623
     
    613659                        lock_xp  = XPTR( page_cxy , &page_ptr->lock );
    614660
     661                        // get lock protecting "forks" counter
     662                        remote_busylock_acquire( lock_xp );
     663
    615664                        // increment "forks"
    616                         remote_spinlock_lock( lock_xp );
    617665                        hal_remote_atomic_add( forks_xp , 1 );
    618                         remote_spinlock_unlock( lock_xp );
     666
     667                        // release lock protecting "forks" counter
     668                        remote_busylock_release( lock_xp );
    619669
    620670#if DEBUG_VMM_FORK_COPY
     
    630680    }   // end loop on vsegs
    631681
    632     // release the parent vsegs lock
    633     remote_rwlock_rd_unlock( parent_lock_xp );
     682    // release the parent VSL lock in read mode
     683    remote_rwlock_rd_release( parent_lock_xp );
    634684
    635685    // initialize child GPT (architecture specic)
     
    703753    // get extended pointer on VSL root and VSL lock
    704754    xptr_t   root_xp = XPTR( local_cxy , &vmm->vsegs_root );
    705         xptr_t   lock_xp = XPTR( local_cxy , &vmm->vsegs_lock );
    706 
    707     // get lock protecting vseg list
    708         remote_rwlock_wr_lock( lock_xp );
    709755
    710756    // remove all user vsegs registered in VSL
     
    712758        {
    713759        // get pointer on first vseg in VSL
    714                 vseg_xp = XLIST_FIRST_ELEMENT( root_xp , vseg_t , xlist );
     760                vseg_xp = XLIST_FIRST( root_xp , vseg_t , xlist );
    715761        vseg    = GET_PTR( vseg_xp );
    716762
     
    719765
    720766        // remove vseg from VSL
    721                 vseg_detach( vseg );
     767                vmm_vseg_detach( vmm , vseg );
    722768
    723769        // release memory allocated to vseg descriptor
     
    732778        }
    733779
    734     // release lock protecting VSL
    735         remote_rwlock_wr_unlock( lock_xp );
    736 
    737780    // remove all vsegs from zombi_lists in MMAP allocator
    738781    uint32_t i;
     
    748791__FUNCTION__ , vseg_type_str( vseg->type ), vseg->vpn_base, vseg->vpn_size );
    749792#endif
    750                     vseg_detach( vseg );
     793                    vmm_vseg_detach( vmm , vseg );
    751794            vseg_free( vseg );
    752795
     
    812855
    813856    // get lock on stack allocator
    814     spinlock_lock( &mgr->lock );
     857    busylock_acquire( &mgr->lock );
    815858
    816859    // get first free slot index in bitmap
     
    818861    if( (index < 0) || (index > 31) )
    819862    {
    820         spinlock_unlock( &mgr->lock );
    821         return ENOMEM;
     863        busylock_release( &mgr->lock );
     864        return 0xFFFFFFFF;
    822865    }
    823866
     
    826869
    827870    // release lock on stack allocator
    828     spinlock_unlock( &mgr->lock );
     871    busylock_release( &mgr->lock );
    829872
    830873    // returns vpn_base, vpn_size (one page non allocated)
     
    864907
    865908    // get lock on mmap allocator
    866     spinlock_lock( &mgr->lock );
     909    busylock_acquire( &mgr->lock );
    867910
    868911    // get vseg from zombi_list or from mmap zone
     
    892935
    893936    // release lock on mmap allocator
    894     spinlock_unlock( &mgr->lock );
     937    busylock_release( &mgr->lock );
    895938
    896939    // returns vpn_base, vpn_size
     
    10021045
    10031046    // attach vseg to VSL
    1004     xptr_t lock_xp = XPTR( local_cxy , &vmm->vsegs_lock );
    1005         remote_rwlock_wr_lock( lock_xp );
    1006         vseg_attach( vmm , vseg );
    1007         remote_rwlock_wr_unlock( lock_xp );
     1047        vmm_vseg_attach( vmm , vseg );
    10081048
    10091049#if DEBUG_VMM_CREATE_VSEG
     
    10271067
    10281068    // detach vseg from VSL
    1029     xptr_t lock_xp = XPTR( local_cxy , &vmm->vsegs_lock );
    1030         remote_rwlock_wr_lock( lock_xp );
    1031         vseg_detach( vseg );
    1032         remote_rwlock_wr_unlock( lock_xp );
     1069        vmm_vseg_detach( vmm , vseg );
    10331070
    10341071    // release the stack slot to VMM stack allocator if STACK type
     
    10421079
    10431080        // update stacks_bitmap
    1044         spinlock_lock( &mgr->lock );
     1081        busylock_acquire( &mgr->lock );
    10451082        bitmap_clear( &mgr->bitmap , index );
    1046         spinlock_unlock( &mgr->lock );
     1083        busylock_release( &mgr->lock );
    10471084    }
    10481085
     
    10571094
    10581095        // update zombi_list
    1059         spinlock_lock( &mgr->lock );
     1096        busylock_acquire( &mgr->lock );
    10601097        list_add_first( &mgr->zombi_list[index] , &vseg->zlist );
    1061         spinlock_unlock( &mgr->lock );
     1098        busylock_release( &mgr->lock );
    10621099    }
    10631100
     
    11121149#endif
    11131150
    1114             // check small page
    1115             assert( (attr & GPT_SMALL) ,
    1116             "an user vseg must use small pages" );
     1151// check small page
     1152assert( (attr & GPT_SMALL) ,
     1153"an user vseg must use small pages" );
    11171154
    11181155            // unmap GPT entry in all GPT copies
     
    11211158            // handle pending forks counter if
    11221159            // 1) not identity mapped
    1123             // 2) running in reference cluster
     1160            // 2) reference cluster
    11241161            if( ((vseg->flags & VSEG_IDENT)  == 0) &&
    11251162                (GET_CXY( process->ref_xp ) == local_cxy) )
     
    11341171                lock_xp  = XPTR( page_cxy , &page_ptr->lock );
    11351172
    1136                 // get lock protecting page descriptor
    1137                 remote_spinlock_lock( lock_xp );
    1138 
    11391173                // get pending forks counter
    1140                 forks = hal_remote_lw( forks_xp );
     1174                forks = hal_remote_l32( forks_xp );
    11411175               
    11421176                if( forks )  // decrement pending forks counter
     
    11571191                    }
    11581192                }
    1159 
    1160                 // release lock protecting page descriptor
    1161                 remote_spinlock_unlock( lock_xp );
    11621193            }
    11631194        }
     
    11941225
    11951226    // get lock protecting the VSL
    1196     remote_rwlock_rd_lock( lock_xp );
     1227    remote_rwlock_rd_acquire( lock_xp );
    11971228
    11981229    // scan the list of vsegs in VSL
     
    12041235        {
    12051236            // return success
    1206             remote_rwlock_rd_unlock( lock_xp );
     1237            remote_rwlock_rd_release( lock_xp );
    12071238            return vseg;
    12081239        }
     
    12101241
    12111242    // return failure
    1212     remote_rwlock_rd_unlock( lock_xp );
     1243    remote_rwlock_rd_release( lock_xp );
    12131244    return NULL;
    12141245
     
    12401271
    12411272    // get lock protecting VSL
    1242         remote_rwlock_wr_lock( lock_xp );
     1273        remote_rwlock_wr_acquire( lock_xp );
    12431274
    12441275        if( (vseg->min > addr_min) || (vseg->max < addr_max) )   // region not included in vseg
     
    13011332
    13021333    // release VMM lock
    1303         remote_rwlock_wr_unlock( lock_xp );
     1334        remote_rwlock_wr_release( lock_xp );
    13041335
    13051336        return error;
     
    13481379
    13491380        // register local vseg in local VMM
    1350         vseg_attach( &process->vmm , vseg );
     1381        vmm_vseg_attach( &process->vmm , vseg );
    13511382    }   
    13521383   
     
    13811412    uint32_t     flags = vseg->flags;
    13821413
    1383     assert( ( type != VSEG_TYPE_FILE ) , "illegal vseg type\n" );
     1414// check vseg type
     1415assert( ( type != VSEG_TYPE_FILE ) , "illegal vseg type\n" );
    13841416
    13851417    if( flags & VSEG_DISTRIB )    // distributed => cxy depends on vpn LSB
     
    14001432        }
    14011433        page_cxy = ( x << y_width ) + y;
     1434
     1435//      if ( LOCAL_CLUSTER->valid[x][y] == false ) page_cxy = cluster_random_select();
     1436
    14021437    }
    14031438    else                          // other cases => cxy specified in vseg
     
    14571492        xptr_t mapper_xp = vseg->mapper_xp;
    14581493
    1459         assert( (mapper_xp != XPTR_NULL),
    1460         "mapper not defined for a FILE vseg\n" );
     1494assert( (mapper_xp != XPTR_NULL),
     1495"mapper not defined for a FILE vseg\n" );
    14611496       
    14621497        // get mapper cluster and local pointer
     
    14951530            xptr_t     mapper_xp = vseg->mapper_xp;
    14961531
    1497             assert( (mapper_xp != XPTR_NULL),
    1498             "mapper not defined for a CODE or DATA vseg\n" );
     1532assert( (mapper_xp != XPTR_NULL),
     1533"mapper not defined for a CODE or DATA vseg\n" );
    14991534       
    15001535            // get mapper cluster and local pointer
     
    15131548__FUNCTION__, this, vpn, elf_offset );
    15141549#endif
    1515 
    1516 
    15171550            // compute extended pointer on page base
    15181551            xptr_t base_xp  = ppm_page2base( page_xp );
     
    15291562__FUNCTION__, this, vpn );
    15301563#endif
    1531 
    1532 
    15331564                if( GET_CXY( page_xp ) == local_cxy )
    15341565                {
     
    16461677    error_t    error;
    16471678
    1648     thread_t * this  = CURRENT_THREAD;
    16491679
    16501680#if DEBUG_VMM_GET_PTE
     1681thread_t * this  = CURRENT_THREAD;
    16511682uint32_t   cycle = (uint32_t)hal_get_cycles();
    16521683if( DEBUG_VMM_GET_PTE < cycle )
     
    16631694                           &vseg );
    16641695
    1665     // vseg has been checked by the vmm_handle_page_fault() function
    1666     assert( (vseg != NULL) , "vseg undefined / vpn %x\n");
     1696// vseg has been checked by the vmm_handle_page_fault() function
     1697assert( (vseg != NULL) , "vseg undefined / vpn %x\n");
    16671698
    16681699    if( cow )  //////////////// copy_on_write request //////////////////////
     
    16751706        hal_gpt_get_pte( &vmm->gpt , vpn , &old_attr , &old_ppn );
    16761707
    1677         assert( (old_attr & GPT_MAPPED),
    1678           "PTE unmapped for a COW exception / vpn %x\n" );
     1708assert( (old_attr & GPT_MAPPED),
     1709"PTE unmapped for a COW exception / vpn %x\n" );
    16791710
    16801711#if( DEBUG_VMM_GET_PTE & 1 )
     
    16931724        xptr_t lock_xp  = XPTR( page_cxy , &page_ptr->lock );
    16941725
    1695         // take lock protecting page descriptor
    1696         remote_spinlock_lock( lock_xp );
     1726        // take lock protecting "forks" counter
     1727        remote_busylock_acquire( lock_xp );
    16971728
    16981729        // get number of pending forks in page descriptor
    1699         uint32_t forks = hal_remote_lw( forks_xp );
     1730        uint32_t forks = hal_remote_l32( forks_xp );
    17001731
    17011732        if( forks )        // pending fork => allocate a new page, copy old to new
     
    17281759        }
    17291760
    1730         // release lock protecting page descriptor
    1731         remote_spinlock_unlock( lock_xp );
     1761        // release lock protecting "forks" counter
     1762        remote_busylock_release( lock_xp );
    17321763
    17331764        // build new_attr : reset COW and set WRITABLE,
     
    18401871    type = vseg->type;
    18411872
    1842     // get reference process cluster and local pointer
     1873    // get relevant process cluster and local pointer
    18431874    // for private vsegs (CODE and DATA type),
    1844     // the reference is the local process descriptor.
     1875    // the relevant process descriptor is local.
    18451876    if( (type == VSEG_TYPE_STACK) || (type == VSEG_TYPE_CODE) )
    18461877    {
  • trunk/kernel/mm/vmm.h

    r469 r567  
    3030#include <bits.h>
    3131#include <list.h>
    32 #include <spinlock.h>
     32#include <queuelock.h>
    3333#include <hal_gpt.h>
    3434#include <vseg.h>
     
    5454typedef struct stack_mgr_s
    5555{
    56     spinlock_t     lock;               /*! lock protecting STACK allocator                  */
     56    busylock_t     lock;               /*! lock protecting STACK allocator                  */
    5757    vpn_t          vpn_base;           /*! first page of STACK zone                         */
    5858    bitmap_t       bitmap;             /*! bit bector of allocated stacks                   */
     
    7979typedef struct mmap_mgr_s
    8080{
    81     spinlock_t     lock;               /*! lock protecting MMAP allocator                   */
     81    busylock_t     lock;               /*! lock protecting MMAP allocator                   */
    8282    vpn_t          vpn_base;           /*! first page of MMAP zone                          */
    8383    vpn_t          vpn_size;           /*! number of pages in MMAP zone                     */
     
    9090 * This structure defines the Virtual Memory Manager for a given process in a given cluster.
    9191 * This local VMM provides four main services:
    92  * 1) It registers all vsegs in the local copy of the vseg list (VSL).
    93  * 2) It contains the local copy of the generic page table (GPT).
     92 * 1) It contains the local copy of vseg list (VSL), only complete in referrence.
     93 * 2) It contains the local copy of the generic page table (GPT), only complete in reference.
    9494 * 3) The stack manager dynamically allocates virtual memory space for the STACK vsegs.
    9595 * 4) The mmap manager dynamically allocates virtual memory for the (FILE/ANON/REMOTE) vsegs.
     
    105105typedef struct vmm_s
    106106{
    107         remote_rwlock_t  vsegs_lock;         /*! lock protecting the vsegs list                 */
     107        remote_rwlock_t  vsegs_lock;         /*! lock protecting the local VSL                  */
    108108        xlist_entry_t    vsegs_root;         /*! VSL root (VSL only complete in reference)      */
    109109        uint32_t         vsegs_nr;           /*! total number of local vsegs                    */
     
    153153void vmm_display( struct process_s * process,
    154154                  bool_t             mapping );
     155
     156/*******************************************************************************************
     157 * This function adds a vseg descriptor in the VSL of a given VMM,
     158 * and updates the vmm field in the vseg descriptor.
     159 * It takes the lock protecting VSL.
     160 *******************************************************************************************
     161 * @ vmm       : pointer on the VMM
     162 * @ vseg      : pointer on the vseg descriptor
     163 ******************************************************************************************/
     164void vmm_vseg_attach( struct vmm_s  * vmm,
     165                      vseg_t        * vseg );
     166
     167/*******************************************************************************************
     168 * This function removes a vseg descriptor from the set of vsegs controlled by a given VMM,
     169 * and updates the vmm field in the vseg descriptor. No memory is released.
     170 * It takes the lock protecting VSL.
     171 *******************************************************************************************
     172 * @ vmm       : pointer on the VMM
     173 * @ vseg      : pointer on the vseg descriptor
     174 ******************************************************************************************/
     175void vmm_vseg_detach( struct vmm_s  * vmm,
     176                      vseg_t        * vseg );
    155177
    156178/*********************************************************************************************
  • trunk/kernel/mm/vseg.c

    r503 r567  
    3535#include <ppm.h>
    3636#include <mapper.h>
    37 #include <spinlock.h>
    3837#include <vfs.h>
    3938#include <page.h>
     
    159158
    160159    // initialize vseg with remote_read access
    161     vseg->type        =           hal_remote_lw ( XPTR( cxy , &ptr->type        ) );
     160    vseg->type        =           hal_remote_l32 ( XPTR( cxy , &ptr->type        ) );
    162161    vseg->min         = (intptr_t)hal_remote_lpt( XPTR( cxy , &ptr->min         ) );
    163162    vseg->max         = (intptr_t)hal_remote_lpt( XPTR( cxy , &ptr->max         ) );
    164     vseg->vpn_base    =           hal_remote_lw ( XPTR( cxy , &ptr->vpn_base    ) );
    165     vseg->vpn_size    =           hal_remote_lw ( XPTR( cxy , &ptr->vpn_size    ) );
    166     vseg->flags       =           hal_remote_lw ( XPTR( cxy , &ptr->flags       ) );
    167     vseg->file_offset =           hal_remote_lw ( XPTR( cxy , &ptr->file_offset ) );
    168     vseg->file_size   =           hal_remote_lw ( XPTR( cxy , &ptr->file_size   ) );
    169         vseg->mapper_xp   = (xptr_t)  hal_remote_lwd( XPTR( cxy , &ptr->mapper_xp   ) );
     163    vseg->vpn_base    =           hal_remote_l32 ( XPTR( cxy , &ptr->vpn_base    ) );
     164    vseg->vpn_size    =           hal_remote_l32 ( XPTR( cxy , &ptr->vpn_size    ) );
     165    vseg->flags       =           hal_remote_l32 ( XPTR( cxy , &ptr->flags       ) );
     166    vseg->file_offset =           hal_remote_l32 ( XPTR( cxy , &ptr->file_offset ) );
     167    vseg->file_size   =           hal_remote_l32 ( XPTR( cxy , &ptr->file_size   ) );
     168        vseg->mapper_xp   = (xptr_t)  hal_remote_l64( XPTR( cxy , &ptr->mapper_xp   ) );
    170169
    171170    switch (vseg->type)
     
    186185        case VSEG_TYPE_REMOTE:
    187186        {
    188             vseg->cxy = (cxy_t) hal_remote_lw( XPTR(cxy, &ptr->cxy) );
     187            vseg->cxy = (cxy_t) hal_remote_l32( XPTR(cxy, &ptr->cxy) );
    189188            break;
    190189        }
     
    197196}
    198197
    199 ///////////////////////////////
    200 void vseg_attach( vmm_t  * vmm,
    201                   vseg_t * vseg )
    202 {
    203     // update vseg descriptor
    204     vseg->vmm = vmm;
    205198
    206     // add vseg in vmm list
    207     xlist_add_last( XPTR( local_cxy , &vmm->vsegs_root ),
    208                     XPTR( local_cxy , &vseg->xlist ) );
    209 }
    210 
    211 /////////////////////////////////
    212 void vseg_detach( vseg_t * vseg )
    213 {
    214     // update vseg descriptor
    215     vseg->vmm = NULL;
    216 
    217     // remove vseg from vmm list
    218     xlist_unlink( XPTR( local_cxy , &vseg->xlist ) );
    219 }
    220 
  • trunk/kernel/mm/vseg.h

    r503 r567  
    2828
    2929#include <hal_kernel_types.h>
    30 #include <spinlock.h>
    3130#include <vfs.h>
    3231
     
    145144                         xptr_t   ref_xp );
    146145
    147 /*******************************************************************************************
    148  * This function adds a vseg descriptor in the set of vsegs controlled by a given VMM,
    149  * and updates the vmm field in the vseg descriptor.
    150  * The lock protecting the vsegs list in VMM must be taken by the caller.
    151  *******************************************************************************************
    152  * @ vmm       : pointer on the VMM
    153  * @ vseg      : pointer on the vseg descriptor
    154  ******************************************************************************************/
    155 void vseg_attach( struct vmm_s  * vmm,
    156                   vseg_t        * vseg );
    157 
    158 /*******************************************************************************************
    159  * This function removes a vseg descriptor from the set of vsegs controlled by a given VMM,
    160  * and updates the vmm field in the vseg descriptor. No memory is released.
    161  * The lock protecting the vsegs list in VMM must be taken by the caller.
    162  *******************************************************************************************
    163  * @ vseg      : pointer on the vseg descriptor
    164  ******************************************************************************************/
    165 void vseg_detach( vseg_t        * vseg );
    166 
    167146
    168147#endif /* _VSEG_H_ */
Note: See TracChangeset for help on using the changeset viewer.