Changeset 23 for trunk/kernel/mm


Ignore:
Timestamp:
Jun 18, 2017, 10:06:41 PM (4 years ago)
Author:
alain
Message:

Introduce syscalls.

Location:
trunk/kernel/mm
Files:
9 edited

Legend:

Unmodified
Added
Removed
  • trunk/kernel/mm/kcm.h

    r18 r23  
    7070typedef struct kcm_page_s
    7171{
    72         BITMAP          ( bitmap , CONFIG_KCM_BLOCKS_MAX );
     72        uint32_t        bitmap[BITMAP_SIZE(CONFIG_KCM_BLOCKS_MAX)];
    7373        uint8_t       * base;                  /*! pointer on first block in page          */
    7474        kcm_t         * kcm;                   /*! owner KCM allocator                     */
  • trunk/kernel/mm/kmem.c

    r18 r23  
    4444#include <remote_sem.h>
    4545#include <remote_barrier.h>
     46#include <remote_mutex.h>
     47#include <remote_condvar.h>
    4648#include <mapper.h>
    4749#include <grdxt.h>
     
    8991    else if( type == KMEM_CPU_CTX )       return sizeof( hal_cpu_context_t );
    9092    else if( type == KMEM_FPU_CTX )       return sizeof( hal_fpu_context_t );
     93    else if( type == KMEM_BARRIER )       return sizeof( remote_barrier_t );
    9194
    9295    else if( type == KMEM_FATFS_INODE )   return sizeof( fatfs_inode_t );
    9396    else if( type == KMEM_FATFS_CTX )     return sizeof( fatfs_ctx_t );
    94     else if( type == KMEM_RAMFS_INODE )   return sizeof( ramfs_inode_t );
    95     else if( type == KMEM_RAMFS_CTX )     return sizeof( ramfs_ctx_t );
     97    else if( type == KMEM_DEVFS_INODE )   return sizeof( devfs_inode_t );
     98    else if( type == KMEM_MUTEX )         return sizeof( remote_mutex_t );
    9699    else if( type == KMEM_VFS_CTX )       return sizeof( vfs_ctx_t );
    97100    else if( type == KMEM_VFS_INODE )     return sizeof( vfs_inode_t );
     
    99102    else if( type == KMEM_VFS_FILE )      return sizeof( vfs_file_t );
    100103    else if( type == KMEM_SEM )           return sizeof( remote_sem_t );
     104    else if( type == KMEM_CONDVAR )       return sizeof( remote_condvar_t );
    101105    else                                  return 0;
    102106}
     
    114118    else if( type == KMEM_CPU_CTX )       return "KMEM_CPU_CTX";
    115119    else if( type == KMEM_FPU_CTX )       return "KMEM_FPU_CTX";
     120    else if( type == KMEM_BARRIER )       return "KMEM_BARRIER";
    116121
    117122    else if( type == KMEM_FATFS_INODE )   return "KMEM_FATFS_INODE";
    118123    else if( type == KMEM_FATFS_CTX )     return "KMEM_FATFS_CTX";
    119     else if( type == KMEM_RAMFS_INODE )   return "KMEM_RAMFS_INODE";
    120     else if( type == KMEM_RAMFS_CTX )     return "KMEM_RAMFS_CTX";
     124    else if( type == KMEM_DEVFS_INODE )   return "KMEM_DEVFS_INODE";
     125    else if( type == KMEM_MUTEX )         return "KMEM_MUTEX";
    121126    else if( type == KMEM_VFS_CTX )       return "KMEM_VFS_CTX";
    122127    else if( type == KMEM_VFS_INODE )     return "KMEM_VFS_INODE";
     
    124129    else if( type == KMEM_VFS_FILE )      return "KMEM_VFS_FILE";
    125130    else if( type == KMEM_SEM )           return "KMEM_SEM";
     131    else if( type == KMEM_SEM )           return "KMEM_CONDVAR";
    126132    else                                  return "undefined";
    127133}
  • trunk/kernel/mm/kmem.h

    r18 r23  
    3131
    3232/*************************************************************************************
    33  * This enum defines the Kernel Memory Types for dynamically allocated objectsn.
     33 * This enum defines the Kernel Memory Types for dynamically allocated objects.
     34 * WARNING : this enum must be kepts consistent with use in kmem.c file.
    3435 ************************************************************************************/
    3536
     
    4546  KMEM_CPU_CTX          = 7,   /*! hal_cpu_context_t                                */
    4647  KMEM_FPU_CTX          = 8,   /*! hal_fpu_context_t                                */
    47   KMEM_TBD_9            = 9,
     48  KMEM_BARRIER          = 9,   /*! remote_barrier_t                                 */
    4849
    4950  KMEM_FATFS_INODE      = 10,  /*! fatfs_inode_t                                    */
    5051  KMEM_FATFS_CTX        = 11,  /*! fatfs_ctx_t                                      */
    51   KMEM_RAMFS_INODE      = 12,  /*  ramfs_inode_t                                    */
    52   KMEM_RAMFS_CTX        = 13,  /*! ramfs_ctx_t                                      */
     52  KMEM_DEVFS_INODE      = 12,  /*  devfs_inode_t                                    */
     53  KMEM_MUTEX            = 13,  /*! remote_mutex_t                                   */
    5354  KMEM_VFS_CTX          = 14,  /*! vfs_context_t                                    */
    5455  KMEM_VFS_INODE        = 15,  /*! vfs_inode_t                                      */
     
    5657  KMEM_VFS_FILE         = 17,  /*! vfs_file_t                                       */
    5758  KMEM_SEM              = 18,  /*! remote_sem_t                                     */
    58   KMEM_TBD_19           = 19,
     59  KMEM_CONDVAR          = 19,  /*! remote_condvar_t                                 */
    5960
    60   KMEM_TYPES_NR         = 20,
     61  KMEM_TYPES_NR         = 19,
    6162};
    6263
  • trunk/kernel/mm/mapper.c

    r18 r23  
    2626#include <hal_types.h>
    2727#include <hal_special.h>
     28#include <hal_uspace.h>
    2829#include <grdxt.h>
    2930#include <rwlock.h>
     
    108109        {
    109110            // remove page from mapper and release to PPM
    110                 error = mapper_release_page( mapper , found_index , page );
     111                error = mapper_release_page( mapper , page );
    111112
    112113            if ( error ) return error;
     
    148149
    149150    // test if page available in mapper
    150     if( ( page == NULL) || page_is_flag( page , PG_INLOAD ) )  // page not available            /
     151    if( ( page == NULL) || page_is_flag( page , PG_INLOAD ) )  // page not available
    151152    {
    152153        // release the lock in READ_MODE and take it in WRITE_MODE
     
    194195                printk("\n[ERROR] in %s : thread %x cannot insert page in mapper\n",
    195196                       __FUNCTION__ , this->trdid );
    196                 mapper_release_page( mapper , index , page );
     197                mapper_release_page( mapper , page );
    197198                page_clear_flag( page , PG_ALL );
    198199                req.ptr  = page;
     
    203204
    204205            // launch I/O operation to load page from file system
    205             error = mapper_updt_page( mapper , index , page );
     206            error = vfs_move_page_to_mapper( page );
    206207
    207208            if( error )
     
    209210                printk("\n[ERROR] in %s : thread %x cannot load page from device\n",
    210211                       __FUNCTION__ , this->trdid );
    211                 mapper_release_page( mapper , index , page );
     212                mapper_release_page( mapper , page );
    212213                page_clear_flag( page , PG_ALL );
    213214                req.ptr  = page;
     
    255256///////////////////////////////////////////////
    256257error_t mapper_release_page( mapper_t * mapper,
    257                              uint32_t   index,
    258258                             page_t   * page )
    259259{
     
    261261
    262262    // lauch IO operation to update page to file system
    263     error = mapper_sync_page( mapper , index , page );
     263    error = vfs_move_page_from_mapper( page );
    264264
    265265    if( error )
     
    288288}  // end mapper_release_page()
    289289
    290 ////////////////////////////////////////////
    291 error_t mapper_updt_page( mapper_t * mapper,
    292                           uint32_t   index,
    293                           page_t   * page )
    294 {
    295     uint32_t      type;
    296     vfs_inode_t * inode;
    297     error_t       error = 0;
    298 
    299     if( page == NULL )
    300     {
    301         printk("\n[PANIC] in %s : page pointer is NULL\n", __FUNCTION__ );
    302         hal_core_sleep();
    303     }
    304 
    305     if( mapper == NULL )
    306     {
    307         printk("\n[PANIC] in %s : no mapper for this page\n", __FUNCTION__ );
    308         hal_core_sleep();
    309     }
    310 
    311     // get file system type and inode pointer
    312     inode = mapper->inode;
    313     type  = inode->ctx->type;
    314 
    315     // get page lock
    316     page_lock( page );
    317 
    318     // get mapper lock in WRITE_MODE
    319     rwlock_wr_lock( &mapper->lock );
    320 
    321     // call proper I/O operation to update file system
    322     if     ( type == FS_TYPE_FATFS ) error = fatfs_read_page( page );
    323     else if( type == FS_TYPE_RAMFS ) error = ramfs_read_page( page );
    324     else
    325     {
    326         printk("\n[PANIC] in %s : undefined file system type\n", __FUNCTION__ );
    327         hal_core_sleep();
    328     }
    329 
    330     // release mapper lock from WRITE_MODE
    331     rwlock_wr_unlock( &mapper->lock );
    332 
    333     // release page lock
    334     page_unlock( page );
    335 
    336     if( error )
    337     {
    338         printk("\n[PANIC] in %s : cannot access file system\n", __FUNCTION__ );
    339         return EIO;
    340     }
    341 
    342     return 0;
    343 }  // end mapper_updt_page
    344 
    345 ////////////////////////////////////////////
    346 error_t mapper_sync_page( mapper_t * mapper,
    347                           uint32_t   index,
    348                           page_t   * page )
    349 {
    350     uint32_t      type;
    351     vfs_inode_t * inode;
    352     error_t       error = 0;
    353 
    354     if( page == NULL )
    355     {
    356         printk("\n[PANIC] in %s : page pointer is NULL\n", __FUNCTION__ );
    357         hal_core_sleep();
    358     }
    359 
    360     if( mapper == NULL )
    361     {
    362         printk("\n[PANIC] in %s : no mapper for this page\n", __FUNCTION__ );
    363         hal_core_sleep();
    364     }
    365 
    366         if( page_is_flag( page , PG_DIRTY ) )
    367         {
    368         // get file system type and inode pointer
    369         inode = mapper->inode;
    370         type  = inode->ctx->type;
    371 
    372         // get page lock
    373         page_lock( page );
    374 
    375         // get mapper lock in READ_MODE
    376         rwlock_rd_lock( &mapper->lock );
    377 
    378         // call proper I/O operation to update file system
    379         if     ( type == FS_TYPE_FATFS ) error = fatfs_write_page( page );
    380         else if( type == FS_TYPE_RAMFS ) error = ramfs_write_page( page );
    381         else
    382         {
    383             printk("\n[PANIC] in %s : undefined file system type\n", __FUNCTION__ );
    384             hal_core_sleep();
    385         }
    386 
    387         // release mapper lock from READ_MODE
    388         rwlock_rd_unlock( &mapper->lock );
    389 
    390         // release page lock
    391         page_unlock( page );
    392 
    393         if( error )
    394         {
    395             printk("\n[PANIC] in %s : cannot update file system\n", __FUNCTION__ );
    396             return EIO;
    397         }
    398 
    399         // clear dirty bit
    400                 page_undo_dirty( page );
    401      }
    402 
    403     return 0;
    404 
    405 }  // end mapper_sync_page()
    406 
    407 ///////////////////////////////////////////////////////////////////////////////////////
    408 // This static function is called by the mapper_move fragments() function.
    409 // It moves one fragment between an user buffer and the kernel mapper.
    410 // Implementation Note: It can require access to one or two pages in mapper:
    411 //  [max_page_index == min_page_index]     <=>  fragment fit in one mapper page
    412 //  [max_page index == min_page_index + 1] <=>  fragment spread on two mapper pages
    413 ///////////////////////////////////////////////////////////////////////////////////////
    414 static error_t mapper_move_one_fragment( mapper_t   * mapper,
    415                                          bool_t       to_buffer,
    416                                          fragment_t * fragment )
    417 {
    418     uint32_t   size;                 // number of bytes in fragment
    419     cxy_t      buf_cxy;              // cluster identifier for user buffer
    420     uint8_t  * buf_ptr;              // local pointer on first byte in user buffer
    421 
    422     xptr_t     xp_buf;               // extended pointer on byte in user buffer
    423     xptr_t     xp_map;               // extended pointer on byte in kernel mapper
    424 
    425     uint32_t   min_file_offset;      // offset of first byte in file
    426     uint32_t   max_file_offset;      // offset of last byte in file
    427 
    428     uint32_t   first_page_index;     // index of first page in mapper
    429     uint32_t   first_page_offset;    // offset of first byte in first page in mapper
    430     uint32_t   first_page_size;      // offset of first byte in first page in mapper
    431 
    432     uint32_t   second_page_index;    // index of last page in mapper
    433     uint32_t   second_page_offset;   // offset of last byte in last page in mapper
    434     uint32_t   second_page_size;     // offset of last byte in last page in mapper
    435 
    436     page_t   * page;                 // pointer on one page descriptor in mapper
    437     uint8_t  * map_ptr;              // local pointer on first byte in mapper
    438 
    439     // get fragment attributes in user buffer
    440     buf_cxy = fragment->buf_cxy;
    441     buf_ptr = fragment->buf_ptr;
    442     size    = fragment->size;
    443 
    444     if( size > CONFIG_PPM_PAGE_SIZE )
    445     {
    446         printk("\n[PANIC] in %s : illegal fragment size = %d\n",
    447                __FUNCTION__ , size );
    448         return EINVAL;
    449     }
     290/////////////////////////////////////////
     291error_t mapper_move( mapper_t  *  mapper,
     292                     bool_t       to_buffer,
     293                     uint32_t     file_offset,
     294                     void      *  buffer,
     295                     uint32_t     size )
     296{
     297    uint32_t   page_offset;    // first byte to move to/from a mapper page
     298    uint32_t   page_count;     // number of bytes to move to/from a mapper page
     299    uint32_t   index;          // current mapper page index
     300    uint32_t   done;           // number of moved bytes
     301    page_t   * page;           // current mapper page descriptor
     302    uint8_t  * map_ptr;        // current mapper  address
     303    uint8_t  * buf_ptr;        // current buffer  address
    450304
    451305    // compute offsets of first and last bytes in file
    452     min_file_offset = fragment->file_offset;
    453     max_file_offset = min_file_offset + size;
     306    uint32_t min_byte = file_offset;
     307    uint32_t max_byte = file_offset + size -1;
    454308
    455309    // compute indexes of pages for first and last byte in mapper
    456     first_page_index  = min_file_offset >> CONFIG_PPM_PAGE_SHIFT;
    457     second_page_index = max_file_offset >> CONFIG_PPM_PAGE_SHIFT;
    458 
    459     if ( first_page_index == second_page_index )  // only one page in mapper
    460     {
    461         // compute offset and size for page in mapper
    462         first_page_offset = min_file_offset & (1<<CONFIG_PPM_PAGE_SHIFT);
    463         first_page_size   = size;
    464 
    465         // get pointer on first page in mapper
    466         page = mapper_get_page( mapper , first_page_index );
     310    uint32_t first = min_byte >> CONFIG_PPM_PAGE_SHIFT;
     311    uint32_t last  = max_byte >> CONFIG_PPM_PAGE_SHIFT;
     312
     313    done = 0;
     314
     315    // loop on pages in mapper
     316    for( index = first ; index <= last ; index++ )
     317    {
     318        // compute page_offset
     319        if( index == first ) page_offset = min_byte & CONFIG_PPM_PAGE_MASK;
     320        else                 page_offset = 0;
     321
     322        // compute page_count
     323        if      ( first == last  ) page_count = size;
     324        else if ( index == first ) page_count = CONFIG_PPM_PAGE_SIZE - page_offset;
     325        else if ( index == last  ) page_count = (max_byte & CONFIG_PPM_PAGE_MASK) + 1;
     326        else                       page_count = CONFIG_PPM_PAGE_SIZE;
     327
     328        // get page descriptor
     329        page = mapper_get_page( mapper , index );
    467330
    468331        if ( page == NULL ) return EINVAL;
    469332
    470         // compute pointer on fragment first byte in mapper
    471         map_ptr = (uint8_t *)ppm_page2base( page ) + first_page_offset;
    472 
    473         // compute extended pointers in mapper and in buffer
    474         xp_map = XPTR( local_cxy , map_ptr );
    475         xp_buf = XPTR( buf_cxy , buf_ptr );
     333        // compute pointer in mapper
     334        map_ptr = (uint8_t *)ppm_page2base( page ) + page_offset;
     335
     336        // compute pointer in buffer
     337        buf_ptr = (uint8_t *)buffer + done;
    476338
    477339        // move fragment
    478340        if( to_buffer )
    479341        {
    480             hal_remote_memcpy( xp_buf , xp_map , first_page_size );
     342            hal_copy_to_uspace( buf_ptr , map_ptr , page_count );
    481343        }
    482344        else
    483345        {
    484346            page_do_dirty( page );
    485             hal_remote_memcpy( xp_map , xp_buf , first_page_size );
    486         }
    487     }
    488     else                                        // two pages in mapper
    489     {
    490         // compute offset and size for first page in mapper
    491         first_page_offset = min_file_offset & (1<<CONFIG_PPM_PAGE_SHIFT);
    492         first_page_size   = CONFIG_PPM_PAGE_SIZE - first_page_offset;
    493 
    494         // get pointer on first page descriptor in mapper
    495         page = mapper_get_page( mapper , first_page_index );
    496 
    497         if ( page == NULL ) return EINVAL;
    498 
    499         // compute local pointer on first byte in first page in mapper
    500         map_ptr = (uint8_t *)ppm_page2base(page) + first_page_offset;
    501 
    502         // compute extended pointers
    503         xp_map = XPTR( local_cxy , map_ptr );
    504         xp_buf = XPTR( buf_cxy , buf_ptr );
    505 
    506         // move fragment to/from first page
    507         if( to_buffer )
    508         {
    509             hal_remote_memcpy( xp_buf , xp_map , first_page_size );
    510         }
    511         else
    512         {
    513             page_do_dirty( page );
    514             hal_remote_memcpy( xp_map , xp_buf , first_page_size );
    515         }
    516 
    517         // compute offset and size for second page in mapper
    518         second_page_offset = 0;
    519         second_page_size   = size - first_page_size;
    520 
    521         // get pointer on second page in mapper
    522         page = mapper_get_page( mapper , second_page_index );
    523 
    524         if ( page == NULL ) return EINVAL;
    525 
    526         // compute local pointer on first byte in second page in mapper
    527         map_ptr = (uint8_t *)ppm_page2base( page ) + second_page_offset;
    528 
    529         // compute extended pointers
    530         xp_map = XPTR( local_cxy , map_ptr );
    531         xp_buf = XPTR( buf_cxy , buf_ptr + first_page_offset );
    532 
    533         // move fragment to/from second page
    534         if( to_buffer )
    535         {
    536             hal_remote_memcpy( xp_buf , xp_map , second_page_size );
    537         }
    538         else
    539         {
    540             page_do_dirty( page );
    541             hal_remote_memcpy( xp_map , xp_buf , second_page_size );
    542         }
     347            hal_copy_from_uspace( map_ptr , buf_ptr , page_count );
     348        }
     349
     350        done += page_count;
    543351    }
    544352
    545353    return 0;
    546 }  // end mapper_move_one_fragment()
    547 
    548 /////////////////////////////////////////////////
    549 error_t mapper_move_fragments( mapper_t * mapper,
    550                                bool_t     read,
    551                                uint32_t   nb_frags,
    552                                xptr_t     xp_frags )
    553 {
    554         uint32_t     index;
    555         error_t      error;
    556     fragment_t   local_frags[CONFIG_MAPPER_MAX_FRAGMENTS];   // local copy of fragments array
    557     fragment_t * frags_array;                                // pointer on fragments array
    558 
    559     // check nb_frags
    560     if( nb_frags > CONFIG_MAPPER_MAX_FRAGMENTS )
    561     {
    562         printk("\n[PANIC] in %s : number of fragments cannot be larger than %d\n",
    563                __FUNCTION__ , CONFIG_MAPPER_MAX_FRAGMENTS );
    564         return EINVAL;
    565     }
    566 
    567     // get client cluster and local pointer on fragments array
    568     cxy_t        client_cxy   = GET_CXY( xp_frags );
    569     fragment_t * client_frags = (fragment_t *)GET_PTR( xp_frags );
    570 
    571     if ( local_cxy == client_cxy ) // use the local fragments array if possible
    572     {
    573         frags_array = client_frags;
    574     }
    575     else                           // make a local copy of fragments array
    576     {
    577         hal_remote_memcpy( XPTR( local_cxy , local_frags ) , xp_frags ,
    578                            sizeof(fragment_t) * nb_frags );
    579         frags_array = local_frags;
    580     }
    581 
    582     // loop on fragments
    583     for( index = 0 ; index < nb_frags ; index ++ )
    584     {
    585         error = mapper_move_one_fragment( mapper , read , &frags_array[index] );
    586         if ( error ) return error;
    587     }
    588 
    589     return 0;
    590 
    591 }  // end mapper_move_fragments()
    592 
    593 
     354
     355}  // end mapper_move()
     356
     357
     358
  • trunk/kernel/mm/mapper.h

    r18 r23  
    3939/*******************************************************************************************
    4040 * The mapper implements the kernel cache for a given file or directory.
    41  * There is one mapper per file. It is implemented as a three levels radix tree,
     41 * There is one mapper per file/dir. It is implemented as a three levels radix tree,
    4242 * entirely stored in the same cluster as the inode representing the file/dir.
    4343 * - The fast retrieval key is the page index in the file.
     
    4949 * - The mapper is protected by a blocking "rwlock", to support several simultaneous
    5050 *   readers, and only one writer. This lock implement a busy waiting policy.
    51  * - The two functions mapper_sync_page() and mapper_updt_page() define the generic API
    52  *   used to move pages to or from the relevant file system on IOC device.
    53  * - the mapper_move fragments() function is used to move data to or from a distributed
    54  *   user buffer.
     51 * - The two functions vfs_move_page_to_mapper() and vfs_move_page_from_mapper() define
     52 *   the generic API used to move pages to or from the relevant file system on IOC device.
     53 * - the mapper_move() function is used to move data to or from a, possibly distributed
     54 *   user buffer in user space.
    5555 * - The mapper_get_page() function that return a page descriptor pointer from a page
    5656 *   index in file is in charge of handling the miss on the mapper cache.
    5757 * - In the present implementation the cache size increases on demand, and the
    58  *   allocated memory is only released when the mapper is destroyed.
     58 *   allocated memory is only released when the mapper/inode is destroyed.
    5959 ******************************************************************************************/
    6060
     
    6666typedef struct mapper_s
    6767{
    68         struct vfs_inode_s * inode;           /*! owner file inode                                */
     68        struct vfs_inode_s * inode;           /*! owner inode                                     */
    6969        grdxt_t              radix;           /*! pages cache implemented as a radix tree         */
    7070        rwlock_t             lock;        /*! several readers / only one writer               */
     
    8989    uint32_t    size;                /*! number of bytes in fragment                      */
    9090    cxy_t       buf_cxy;             /*! user buffer cluster identifier                   */
    91     uint8_t   * buf_ptr;             /*! local pointer on first byte in user buffer       */
     91    void      * buf_ptr;             /*! local pointer on first byte in user buffer       */
    9292}
    9393fragment_t;
     
    114114
    115115/*******************************************************************************************
    116  * This function moves all fragments covering a distributed user buffer between
    117  * a mapper (associated to a local inode), and the user buffer.
    118  * [See the fragment definition in the mapper.h file]
    119  * It must be executed by a thread running in the cluster containing the mapper.
    120  * The lock protecting the mapper must have been taken in WRITE_MODE or READ_MODE
    121  * by the caller thread, depending on the transfer direction.
     116 * This function move data between a kernel mapper and an user buffer.
     117 * It must be called by a thread running in the cluster containing the mapper.
     118 * It split the data in fragments : one fragment is a set of contiguous bytes
     119 * stored in the same mapper page. 
     120 * It uses "hal_uspace" accesses to move fragments to/from the user buffer.
    122121 * In case of write, the dirty bit is set for all pages written in the mapper.
    123122 * The offset in the file descriptor is not modified by this function.
    124  * Implementation note:
    125  * For each fragment, this function makes ONE hal_remote_memcpy() when the fragment is
    126  * fully contained in one single page of the mapper. It makes TWO hal_remote_memcpy()
    127  * if the fragment spread on two contiguous pages in the mapper.
    128123 *******************************************************************************************
    129  * @ mapper    : local pointer on the local mapper.
    130  * @ to_buffer : mapper to buffer if true / buffer to mapper if false.
    131  * @ nb_frags  : number of fragments covering the user buffer (one per page).
    132  * @ frags_xp  : extended pointer on array of fragments.
    133 FAT * returns O if success / returns EINVAL if error.
     124 * @ mapper       : extended pointer on local mapper.
     125 * @ to_buffer    : move data from mapper to buffer if true.
     126 * @ file_offset  : first byte to move in file.
     127 * @ buffer       : buffer address in user space.
     128 * @ size         : number of bytes to move.
     129 * returns O if success / returns EINVAL if error.
    134130 ******************************************************************************************/
    135 error_t mapper_move_fragments( mapper_t * mapper,
    136                                bool_t     to_buffer,
    137                                uint32_t   nb_frags,
    138                                xptr_t     frags_xp );
    139 
     131error_t mapper_move( mapper_t * mapper,
     132                     bool_t     to_buffer,
     133                     uint32_t   file_offset,
     134                     void     * buffer,
     135                     uint32_t   size );
    140136
    141137/*******************************************************************************************
     
    146142 *******************************************************************************************
    147143 * @ mapper     : local pointer on the mapper.
    148  * @ index      : page index in file
    149144 * @ page       : pointer on page to remove.
    150145 * @ return 0 if success / return EIO if a dirty page cannot be copied to FS.
    151146 ******************************************************************************************/
    152147error_t mapper_release_page( mapper_t      * mapper,
    153                              uint32_t        index,
    154148                             struct page_s * page );
    155149
     
    167161                                 uint32_t   index );
    168162
    169 /*******************************************************************************************
    170  * This function makes an I/O operation to move one page from FS to mapper.
    171  * Depending on the file system type, it calls the proper, FS specific function.
    172  * It must be executed by a thread running in the cluster containing the mapper.
    173  *******************************************************************************************
    174  * @ mapper     : local pointer on the mapper.
    175  * @ index      : page index in file.
    176  * @ page   : local pointer on the page descriptor in mapper.
    177  * @ returns 0 if success / return EINVAL if it cannot access the device.
    178  ******************************************************************************************/
    179 error_t mapper_updt_page( mapper_t      * mapper,
    180                           uint32_t        index,
    181                           struct page_s * page );
    182 
    183 /*******************************************************************************************
    184  * This function makes an I/0 operation to move one page from mapper to FS.
    185  * Depending on the file system type, it calls the proper, FS specific function.
    186  * It must be executed by a thread running in the cluster containing the mapper.
    187  * It does nothing if the page is not dirty. If the page is dirty, it takes
    188  * the page lock before launching the IO operation, clear the page dirty bit,
    189  * and remove the page from the PPM dirty list. It does nothing if the page is not dirty.
    190  *******************************************************************************************
    191  * @ mapper     : local pointer on the mapper.
    192  * @ index      : page index in file.
    193  * @ page   : local pointer on the page descriptor in mapper.
    194  * @ returns 0 if success / return EINVAL if it cannot access the device.
    195  ******************************************************************************************/
    196 error_t mapper_sync_page( mapper_t      * mapper,
    197                           uint32_t        index,
    198                           struct page_s * page );
     163 
    199164
    200165#endif /* _MAPPER_H_ */
  • trunk/kernel/mm/page.c

    r22 r23  
    33 *
    44 * Authors  Ghassan Almaless (2008,2009,2010,2011,2012)
    5  *          Alain Greiner    (2016)
     5 *          Alain Greiner    (2016,2017)
    66 *
    77 * Copyright (c) UPMC Sorbonne Universites
     
    4444        page->flags    = 0;
    4545        page->order    = 0;
     46        page->mapper   = NULL;
    4647        page->index    = 0;
    47         page->mapper   = NULL;
    48         page->private  = 0;
     48        page->fork_nr  = 0;
    4949        page->refcount = 0;
     50
    5051        spinlock_init( &page->lock );
    5152        list_entry_init( &page->list );
     53    xlist_root_init( XPTR( local_cxy , &page->wait_root ) );
    5254}
    5355
    5456////////////////////////////////////////////
    5557inline void page_set_flag( page_t   * page,
    56                            uint16_t   value )
     58                           uint32_t   value )
    5759{
    5860        hal_atomic_or( (uint32_t *)&page->flags , (uint32_t)value );
     
    6163//////////////////////////////////////////////
    6264inline void page_clear_flag( page_t   * page,
    63                              uint16_t   value )
     65                             uint32_t   value )
    6466{
    6567        hal_atomic_and( (uint32_t *)&page->flags , ~((uint32_t)value) );
     
    6870//////////////////////////////////////////////
    6971inline bool_t page_is_flag( page_t   * page,
    70                             uint16_t   value )
    71 {
    72         return (bool_t)(page->flags & value);
     72                            uint32_t   value )
     73{
     74    return ( (page->flags & value) ? 1 : 0 );
    7375}
    7476
     
    150152
    151153                // sync the page
    152                 mapper_sync_page( mapper , index , page );
     154                vfs_move_page_from_mapper( page );
    153155
    154156                // unlock the page
     
    226228inline void page_refcount_up( page_t *page )
    227229{
    228         hal_atomic_inc( &page->refcount );
     230    hal_atomic_add( &page->refcount , +1 );
    229231}
    230232
     
    232234inline void page_refcount_down( page_t *page )
    233235{
    234         hal_atomic_dec( &page->refcount );
     236    hal_atomic_add( &page->refcount , -1 );
    235237}
    236238
  • trunk/kernel/mm/page.h

    r22 r23  
    4949#define PG_DIRTY            0x0040     // page has been written
    5050#define PG_LOCKED       0x0080     // page is locked
     51#define PG_COW          0x0100     // page is copy-on-write
    5152
    5253#define PG_ALL          0xFFFF     // All flags
     
    5455/*************************************************************************************
    5556 * This structure defines a physical page descriptor.
    56  * Size is 60 bytes for a 32 bits core...
     57 * Size is 64 bytes for a 32 bits core...
    5758 ************************************************************************************/
    5859
    5960typedef struct page_s
    6061{
    61     uint16_t          flags;          /*! flags defined above                  (2)  */
    62     uint16_t          order;          /*! log2( number of 4Kbytes pages)       (2)  */
    63 
     62    uint32_t          flags;          /*! flags defined above                  (4)  */
     63    uint32_t          order;          /*! log2( number of 4Kbytes pages)       (4)  */
    6464    struct mapper_s * mapper;         /*! local pointer on associated mapper   (4)  */
    6565    uint32_t          index;          /*! page index in mapper                 (4)  */
    66 
    67         union                             /*!                                      (4)  */
    68         {
    69                 uint32_t      private;        /*! TODO ??? [AG]                             */
    70                 void        * data;           /*! TODO ??? [AG]                             */
    71                 slist_entry_t root;           /*! TODO ??? [AG]                             */
    72         };
    73 
    7466        list_entry_t      list;           /*! for both dirty pages and free pages  (8)  */
    75 
    7667    xlist_entry_t     wait_root;      /*! root of list of waiting threads      (16) */
    77 
    7868        uint32_t          refcount;       /*! reference counter                    (4)  */
     69        uint32_t          fork_nr;        /*! number of forked processes           (4)  */
    7970        spinlock_t        lock;           /*! only used to set the PG_LOCKED flag  (16) */
    8071}
     
    8374/*************************************************************************************
    8475 * This function initializes one page descriptor.
     76 *************************************************************************************
    8577 * @ page    : pointer to page descriptor
    8678 ************************************************************************************/
     
    8880
    8981/*************************************************************************************
    90  * This function sets one or several flags in page descriptor flags.
     82 * This function atomically set one or several flags in page descriptor flags.
     83 *************************************************************************************
    9184 * @ page    : pointer to page descriptor.
    9285 * @ value   : all non zero bits in value will be set.
    9386 ************************************************************************************/
    9487inline void page_set_flag( page_t   * page,
    95                            uint16_t   value );
     88                           uint32_t   value );
    9689
    9790/*************************************************************************************
    98  * This function clears one or several flags in page descriptor flags.
     91 * This function atomically reset one or several flags in page descriptor flags.
     92 *************************************************************************************
    9993 * @ page    : pointer to page descriptor.
    10094 * @ value   : all non zero bits in value will be cleared.
    10195 ************************************************************************************/
    10296inline void page_clear_flag( page_t   * page,
    103                              uint16_t   value );
     97                             uint32_t   value );
    10498
    10599/*************************************************************************************
    106100 * This function tests the value of one or several flags in page descriptor flags.
     101 *************************************************************************************
    107102 * @ page    : pointer to page descriptor.
    108103 * @ value   : all non zero bits will be tested.
     
    110105 ************************************************************************************/
    111106inline bool_t page_is_flag( page_t   * page,
    112                             uint16_t   value );
     107                            uint32_t   value );
    113108
    114109/*************************************************************************************
     
    121116 * This function sets the PG_DIRTY flag in the page descriptor,
    122117 * and registers the page in the dirty list in PPM.
     118 *************************************************************************************
    123119 * @ page     : pointer on page descriptor.
    124120 * @ returns true if page was not dirty / returns false if page was dirty
     
    129125 * This function resets the PG_DIRTY flag in the page descriptor,
    130126 * and removes the page from the dirty list in PPM.
     127 *************************************************************************************
    131128 * @ page     : pointer on page descriptor.
    132129 * @ returns true if page was dirty / returns false if page was not dirty
     
    136133/*************************************************************************************
    137134 * This function makes a local copy of the content of a src page to a dst page.
     135 *************************************************************************************
    138136 * @ dst      : pointer on destination page descriptor.
    139137 * @ src      : pointer on source page descriptor.
     
    144142/*************************************************************************************
    145143 * This function resets to 0 all bytes in a given page.
     144 *************************************************************************************
    146145 * @ page     : pointer on page descriptor.
    147146 ************************************************************************************/
     
    152151 * It deschedule if the page has already been locked by another thread,
    153152 * and returns only when the flag has been successfully set.
     153 *************************************************************************************
    154154 * @ page     : pointer on page descriptor.
    155155 ************************************************************************************/
     
    160160 * other waiting thread. If there is waiting thread(s), it activates the first
    161161 * waiting thread without modifying the PG_LOCKED flag.
     162 *************************************************************************************
    162163 * @ page     : pointer on page descriptor.
    163164 ************************************************************************************/
     
    166167/*************************************************************************************
    167168 * This blocking function atomically increments the page refcount.
     169 *************************************************************************************
    168170 * @ page     : pointer on page descriptor.
    169171 ************************************************************************************/
     
    172174/*************************************************************************************
    173175 * This blocking function atomically decrements the page refcount.
     176 *************************************************************************************
    174177 * @ page     : pointer on page descriptor.
    175178 ************************************************************************************/
     
    178181/*************************************************************************************
    179182 * This function display the values contained in a page descriptor.
     183 *************************************************************************************
     184 * @ page     : pointer on page descriptor.
    180185 ************************************************************************************/
    181186void page_print( page_t * page );
  • trunk/kernel/mm/vmm.c

    r21 r23  
    2929#include <hal_gpt.h>
    3030#include <printk.h>
     31#include <memcpy.h>
    3132#include <rwlock.h>
    3233#include <list.h>
     
    9495
    9596    // initialize local list of vsegs and radix-tree
     97    vmm->vsegs_nr = 0;
    9698        list_root_init( &vmm->vsegs_root );
    97     vmm->vsegs_nr = 0;
    9899    error = grdxt_init( &vmm->grdxt,
    99100                        CONFIG_VMM_GRDXT_W1,
     
    182183
    183184}  // end vmm_init()
     185
     186//////////////////////////////////////////
     187error_t vmm_copy( process_t * dst_process,
     188                  process_t * src_process )
     189{
     190    error_t error;
     191
     192    vmm_t * src_vmm = &src_process->vmm;
     193    vmm_t * dst_vmm = &dst_process->vmm;
     194
     195    // take the src_vmm vsegs_lock
     196    rwlock_wr_lock( &src_vmm->vsegs_lock );
     197
     198    // initialise dst_vmm vsegs_lock
     199    rwlock_init( &dst_vmm->vsegs_lock );
     200
     201    // initialise the dst_vmm vsegs list and the radix tree
     202    dst_vmm->vsegs_nr = 0;
     203    list_root_init( &dst_vmm->vsegs_root );
     204    error = grdxt_init( &dst_vmm->grdxt,
     205                        CONFIG_VMM_GRDXT_W1,
     206                        CONFIG_VMM_GRDXT_W2,
     207                        CONFIG_VMM_GRDXT_W3 );
     208    if( error )
     209    {
     210        printk("\n[ERROR] in %s : cannot initialise radix tree for process %x\n",
     211               __FUNCTION__ , dst_process->pid );
     212        return ENOMEM;
     213    }
     214
     215    // loop on src_vmm list of vsegs to create
     216    // and register vsegs copies in dst_vmm
     217    list_entry_t * iter;
     218    vseg_t       * src_vseg;
     219    vseg_t       * dst_vseg;
     220    LIST_FOREACH( &src_vmm->vsegs_root , iter )
     221    {
     222        // get pointer on current src_vseg
     223        src_vseg = LIST_ELEMENT( iter , vseg_t , list );
     224
     225        // allocate memory for a new dst_vseg
     226        dst_vseg = vseg_alloc();
     227
     228        if( dst_vseg == NULL )
     229        {
     230            // release all allocated vsegs
     231            LIST_FOREACH( &dst_vmm->vsegs_root , iter )
     232            {
     233                dst_vseg = LIST_ELEMENT( iter , vseg_t , list );
     234                vseg_free( dst_vseg );
     235            }
     236            return ENOMEM;
     237        }
     238
     239        // copy src_vseg to dst_vseg
     240        vseg_init_from_ref( dst_vseg , XPTR( local_cxy , src_vseg ) );
     241
     242        // register dst_vseg in dst_vmm
     243        vseg_attach( dst_vmm , dst_vseg );
     244    }
     245
     246    // release the src_vmm vsegs_lock
     247    rwlock_wr_unlock( &src_vmm->vsegs_lock );
     248
     249    // initialize generic page table
     250    error = hal_gpt_create( &dst_vmm->gpt );
     251
     252    if( error )
     253    {
     254        printk("\n[ERROR] in %s : cannot initialize page table\n", __FUNCTION__ );
     255        return ENOMEM;
     256    }
     257
     258    // initialize STACK allocator
     259    dst_vmm->stack_mgr.bitmap   = 0;
     260    dst_vmm->stack_mgr.vpn_base = CONFIG_VMM_STACK_BASE;
     261
     262    // initialize MMAP allocator
     263    dst_vmm->mmap_mgr.vpn_base        = CONFIG_VMM_MMAP_BASE;
     264    dst_vmm->mmap_mgr.vpn_size        = CONFIG_VMM_STACK_BASE - CONFIG_VMM_MMAP_BASE;
     265    dst_vmm->mmap_mgr.first_free_vpn  = CONFIG_VMM_MMAP_BASE;
     266    uint32_t i;
     267    for( i = 0 ; i < 32 ; i++ ) list_root_init( &dst_vmm->mmap_mgr.zombi_list[i] );
     268
     269    // initialise instrumentation counters
     270        dst_vmm->pgfault_nr    = 0;
     271        dst_vmm->u_err_nr      = 0;
     272        dst_vmm->m_err_nr      = 0;
     273
     274    // copy base addresses
     275    dst_vmm->kent_vpn_base = src_vmm->kent_vpn_base;
     276    dst_vmm->args_vpn_base = src_vmm->args_vpn_base;
     277    dst_vmm->envs_vpn_base = src_vmm->envs_vpn_base;
     278    dst_vmm->heap_vpn_base = src_vmm->heap_vpn_base;
     279    dst_vmm->code_vpn_base = src_vmm->code_vpn_base;
     280    dst_vmm->data_vpn_base = src_vmm->data_vpn_base;
     281
     282    dst_vmm->entry_point   = src_vmm->entry_point;
     283
     284    // HEAP TODO : new heap for child ???
     285    dst_vmm->heap_vseg     = src_vmm->heap_vseg;
     286
     287    // initialize generic page table
     288    error = hal_gpt_create( &dst_vmm->gpt );
     289
     290    if( error )
     291    {
     292        printk("\n[ERROR] in %s : cannot initialize page table\n", __FUNCTION__ );
     293        return ENOMEM;
     294    }
     295
     296    // copy GPT content from src_vmm to dst_vmm, activating "Copy-On-Write"
     297    // TODO register Copy-On_Write in page descriptors
     298    bool_t cow = true;
     299    hal_gpt_copy( &dst_vmm->gpt , &src_vmm->gpt , cow );
     300
     301    hal_wbflush();
     302
     303    return 0;
     304
     305}  // end vmm_copy()
    184306
    185307///////////////////////////////////////
     
    659781    error_t   error;
    660782
    661     // this function must be called by in the reference cluster
    662     if( process->is_ref == false );
     783    // this function must be called by a thread running in the reference cluster
     784    if( GET_CXY( process->ref_xp ) != local_cxy );
    663785    {
    664786        printk("\n[PANIC] in %s : not called in the reference cluster\n", __FUNCTION__ );
     
    796918                           paddr_t * paddr )
    797919{
    798     uint32_t vaddr = (uint32_t)ptr;
    799 
    800     thread_t  * this    = CURRENT_THREAD;
    801     process_t * process = this->process;
     920    process_t * process = CURRENT_THREAD->process;
    802921
    803922    if( ident )  // identity mapping
    804923    {
    805         *paddr = (paddr_t)PADDR( local_cxy , vaddr );
     924        *paddr = (paddr_t)PADDR( local_cxy , (lpa_t)ptr );
    806925        return 0;
    807926    }
     
    814933    uint32_t offset;
    815934
    816     vpn    = (vpn_t)( vaddr >> CONFIG_PPM_PAGE_SHIFT );
    817     offset = (uint32_t)( vaddr & CONFIG_PPM_PAGE_MASK );
    818 
    819     if( process->is_ref )   // calling process is reference process
     935    vpn    = (vpn_t)( (intptr_t)ptr >> CONFIG_PPM_PAGE_SHIFT );
     936    offset = (uint32_t)( ((intptr_t)ptr) & CONFIG_PPM_PAGE_MASK );
     937
     938    if( local_cxy == GET_CXY( process->ref_xp) )   // calling process is reference process
    820939    {
    821940        error = vmm_get_pte( process, vpn , &attr , &ppn );
    822941    }
    823     else                    // use a RPC
     942    else                                           // use a RPC
    824943    {
    825944        cxy_t       ref_cxy = GET_CXY( process->ref_xp );
     
    828947    }
    829948
    830     if( error )
    831     {
    832         printk("\n[ERROR] in %s : cannot get physical address for vaddr = %x\n",
    833                __FUNCTION__ , vaddr );
    834         return error;
    835     }
    836 
    837     // return paddr
     949    // set paddr
    838950    *paddr = (((paddr_t)ppn) << CONFIG_PPM_PAGE_SHIFT) | offset;
    839     return 0;
     951
     952    return error;
    840953
    841954}  // end vmm_v2p_translate()
     955
     956//////////////////////////////////////////////
    842957
    843958
  • trunk/kernel/mm/vmm.h

    r18 r23  
    44 * Authors   Ghassan Almaless (2008,2009,2010,2011, 2012)
    55 *           Mohamed Lamine Karaoui (2015)
    6  *           Alain Greiner (2016)
     6 *           Alain Greiner (2016,2017)
    77 *
    88 * Copyright (c) UPMC Sorbonne Universites
     
    103103        rwlock_t       vsegs_lock;         /*! lock protecting the vsegs list & radix tree      */
    104104        list_entry_t   vsegs_root;         /*! all vsegs in same process and same cluster       */
    105         uint32_t       vsegs_nr;           /*! total number of vsegs                            */
    106 
     105        uint32_t       vsegs_nr;           /*! total number of local vsegs                      */
    107106        grdxt_t        grdxt;              /*! embedded generic vsegs radix tree (key is vpn)   */
    108107
     
    110109
    111110    stack_mgr_t    stack_mgr;          /*! embedded STACK vsegs allocator                   */
    112 
    113111    mmap_mgr_t     mmap_mgr;           /*! embedded MMAP vsegs allocator                    */
    114112
     
    158156
    159157/*********************************************************************************************
     158 * This function copies the content of a source VMM to a destination VMM.
     159 *********************************************************************************************
     160 * @ dst_process   : pointer on destination process descriptor.
     161 * @ src_process   : pointer on source process descriptor.
     162 * @ return 0 if success / return ENOMEM if failure.
     163 ********************************************************************************************/
     164error_t vmm_copy( struct process_s * dst_process,
     165                  struct process_s * src_process );
     166
     167/*********************************************************************************************
    160168 * This function removes all vsegs registered in in a virtual memory manager,
    161169 * and releases the memory allocated to the local generic page table.
    162170 *********************************************************************************************
    163  * @ vmm   : pointer on process descriptor.
     171 * @ process   : pointer on process descriptor.
    164172 ********************************************************************************************/
    165173void vmm_destroy( struct process_s * process );
     
    265273
    266274/*********************************************************************************************
    267  * This function is called by the architecture specific exception handler when a
    268  * page fault has been detected in a given cluster.
     275 * This function is called by the generic exception handler when a page fault
     276 * has been detected in a given cluster.
    269277 * If the local cluster is not the reference cluster, it send a RPC_VMM_GET_PTE
    270278 * to the reference cluster to get the missing PTE attributes and PPN, and update
     
    305313 * This function makes the virtual to physical address translation, using the calling
    306314 * process page table. It uses identity mapping if required by the ident flag.
    307  * This address translation is required to configure the devices
    308  * that have a DMA capability, or to implement the software L2/L3 cache cohérence,
    309  * using the MMC device synchronisation primitives.
    310  * WARNING : the <ident> value must be defined by the CONFIG_KERNEL_IDENT parameter.
     315 * This address translation is required to configure the peripherals having a DMA
     316 * capability, or to implement the software L2/L3 cache cohérence, using the MMC device
     317 * synchronisation primitives.
     318 * WARNING : the <ident> value must be defined by the CONFIG_KERNEL_IDENTITY_MAP parameter.
    311319 *********************************************************************************************
    312320 * @ ident     : [in] uses identity mapping if true.
     
    321329
    322330/*********************************************************************************************
    323  * Pas a sa place ici [AG]
    324  ********************************************************************************************/
    325 int sys_mmap( mmap_attr_t * mattr );
    326 
    327 /*********************************************************************************************
    328331 ********************************************************************************************/
    329332int sys_madvise( void    * start,
Note: See TracChangeset for help on using the changeset viewer.