Changeset 619 for trunk/kernel/mm


Ignore:
Timestamp:
Feb 12, 2019, 1:15:47 PM (5 years ago)
Author:
alain
Message:

1) Fix a bug in KSH : after the "load" command,

the [ksh] prompt is now printed after completion
of the loaded application.

2) Fix a bug in vmm_handle_cow() : the copy-on-write

use now a hal_remote_memcpy() to replicate the page content.


Location:
trunk/kernel/mm
Files:
6 edited

Legend:

Unmodified
Added
Removed
  • trunk/kernel/mm/kcm.c

    r567 r619  
    33 *
    44 * Author  Ghassan Almaless (2008,2009,2010,2011,2012)
    5  *         Alain Greiner    (2016,2017,2018)
     5 *         Alain Greiner    (2016,2017,2018,2019)
    66 *
    77 * Copyright (c) UPMC Sorbonne Universites
     
    5151
    5252#if DEBUG_KCM
     53thread_t * this = CURRENT_THREAD;
    5354uint32_t cycle = (uint32_t)hal_get_cycles();
    5455if( DEBUG_KCM < cycle )
    55 printk("\n[DBG] %s : thread %x enters for %s / page %x / count %d / active %d\n",
    56 __FUNCTION__ , CURRENT_THREAD , kmem_type_str( kcm->type ) ,
     56printk("\n[%s] thread[%x,%x] enters for %s / page %x / count %d / active %d\n",
     57__FUNCTION__, this->process->pid, this->trdid, kmem_type_str(kcm->type),
    5758(intptr_t)kcm_page , kcm_page->count , kcm_page->active );
    5859#endif
    5960
    60         assert( kcm_page->active , "kcm_page should be active" );
     61assert( kcm_page->active , "kcm_page should be active" );
    6162
    6263        // get first block available
    6364        int32_t index = bitmap_ffs( kcm_page->bitmap , kcm->blocks_nr );
    6465
    65         assert( (index != -1) , "kcm_page should not be full" );
     66assert( (index != -1) , "kcm_page should not be full" );
    6667
    6768        // allocate block
     
    9091cycle = (uint32_t)hal_get_cycles();
    9192if( DEBUG_KCM < cycle )
    92 printk("\n[DBG] %s : thread %x exit / type %s / ptr %x / page %x / count %d\n",
    93        __FUNCTION__ , CURRENT_THREAD , kmem_type_str( kcm->type ) , (intptr_t)ptr ,
    94 (intptr_t)kcm_page , kcm_page->count );
     93printk("\n[%s] thread[%x,%x] exit for %s / ptr %x / page %x / count %d\n",
     94__FUNCTION__, this->process->pid, this->trdid, kmem_type_str(kcm->type),
     95(intptr_t)ptr, (intptr_t)kcm_page, kcm_page->count );
    9596#endif
    9697
     
    115116        index = ((uint8_t *)ptr - (uint8_t *)kcm_page - CONFIG_KCM_SLOT_SIZE) / kcm->block_size;
    116117
    117         assert( !bitmap_state( kcm_page->bitmap , index ) , "page already freed" );
    118         assert( (kcm_page->count > 0) , "count already zero" );
     118assert( !bitmap_state( kcm_page->bitmap , index ) , "page already freed" );
     119
     120assert( (kcm_page->count > 0) , "count already zero" );
    119121
    120122        bitmap_set( kcm_page->bitmap , index );
     
    163165        if( page == NULL )
    164166        {
    165                 printk("\n[ERROR] in %s : failed to allocate page in cluster %d\n",
    166                        __FUNCTION__ , local_cxy );
     167                printk("\n[ERROR] in %s : failed to allocate page in cluster %x\n",
     168            __FUNCTION__ , local_cxy );
    167169                return ENOMEM;
    168170        }
     
    216218                   uint32_t   type )
    217219{
    218         // the kcm_page descriptor mut fit in the KCM slot
    219         assert( (sizeof(kcm_page_t) <= CONFIG_KCM_SLOT_SIZE) ,
    220                 "KCM slot too small\n" );
     220
     221// the kcm_page descriptor must fit in the KCM slot
     222assert( (sizeof(kcm_page_t) <= CONFIG_KCM_SLOT_SIZE) , "KCM slot too small\n" );
     223
     224// the allocated object must fit in one single page
     225assert( (kmem_type_size(type) <= (CONFIG_PPM_PAGE_SIZE - CONFIG_KCM_SLOT_SIZE)),
     226"allocated object requires more than one single page\n" );
    221227
    222228        // initialize lock
     
    241247        uint32_t  blocks_nr = (CONFIG_PPM_PAGE_SIZE - CONFIG_KCM_SLOT_SIZE) / block_size;
    242248        kcm->blocks_nr = blocks_nr;
     249
     250#if DEBUG_KCM
     251thread_t * this  = CURRENT_THREAD;
     252uint32_t   cycle = (uint32_t)hal_get_cycles();
     253if( DEBUG_KCM < cycle )
     254printk("\n[%s] thread[%x,%x] initialised KCM %s : block_size %d / blocks_nr %d\n",
     255__FUNCTION__, this->process->pid, this->trdid,
     256kmem_type_str( kcm->type ), block_size, blocks_nr );
     257#endif
     258
    243259}
    244260
     
    331347        kcm_t      * kcm;
    332348
    333         assert( (ptr != NULL) , "pointer cannot be NULL" );
     349// check argument
     350assert( (ptr != NULL) , "pointer cannot be NULL" );
    334351
    335352        kcm_page = (kcm_page_t *)((intptr_t)ptr & ~CONFIG_PPM_PAGE_MASK);
  • trunk/kernel/mm/kcm.h

    r567 r619  
    3737 * for fixed size objects. It exists a specific KCM allocator for each object type.
    3838 * The actual allocated block size is the smallest multiple of the KCM slot, that
    39  * contain one single object. The KCM slot is typically 64 bytes, as it must be large
    40  * enough to store the kcm_page descriptor, defined below.
     39 * contain one single object. The KCM slot is 64 bytes, as it must be large enough
     40 * to store the kcm_page descriptor, defined below.
    4141 * The various KCM allocators themselves are not statically allocated in the cluster
    4242 * manager, but are dynamically allocated when required, using the embedded KCM
  • trunk/kernel/mm/khm.h

    r567 r619  
    33 *
    44 * Authors  Ghassan Almaless (2008,2009,2010,2011,2012)
    5  *          Alain Greiner (2016,2017,2018)
     5 *          Alain Greiner (2016,2017,2018,2019)
    66 *
    77 * Copyright (c) UPMC Sorbonne Universites
     
    3232/*******************************************************************************************
    3333 * This structure defines a Kernel Heap Manager (KHM) in a given cluster.
    34  * It is used to allocate memory objects, that are not enough replicated to justify
    35  * a dedicated KCM allocator.
     34 * It is used to allocate memory objects, that too large, or not enough replicated
     35 * to use a dedicated KCM allocator.
    3636 ******************************************************************************************/
    3737
  • trunk/kernel/mm/kmem.c

    r612 r619  
    9090    else if( type == KMEM_CPU_CTX )       return CONFIG_CPU_CTX_SIZE;
    9191    else if( type == KMEM_FPU_CTX )       return CONFIG_FPU_CTX_SIZE;
    92     else if( type == KMEM_BARRIER )       return sizeof( remote_barrier_t );
    93 
     92    else if( type == KMEM_GEN_BARRIER )   return sizeof( generic_barrier_t );
     93
     94    else if( type == KMEM_SMP_BARRIER )   return sizeof( simple_barrier_t );
    9495    else if( type == KMEM_DEVFS_CTX )     return sizeof( fatfs_ctx_t );
    9596    else if( type == KMEM_FATFS_CTX )     return sizeof( fatfs_ctx_t );
     
    101102    else if( type == KMEM_CONDVAR )       return sizeof( remote_condvar_t );
    102103    else if( type == KMEM_MUTEX )         return sizeof( remote_mutex_t );
     104
    103105    else if( type == KMEM_DIR )           return sizeof( user_dir_t );
    104 
    105106        else if( type == KMEM_512_BYTES )     return 512;
    106107
     
    120121        else if( type == KMEM_CPU_CTX )       return "KMEM_CPU_CTX";
    121122        else if( type == KMEM_FPU_CTX )       return "KMEM_FPU_CTX";
    122         else if( type == KMEM_BARRIER )       return "KMEM_BARRIER";
    123 
     123        else if( type == KMEM_GEN_BARRIER )   return "KMEM_GEN_BARRIER";
     124
     125    else if( type == KMEM_SMP_BARRIER )   return "KMEM_SMP_BARRIER";
    124126    else if( type == KMEM_DEVFS_CTX )     return "KMEM_DEVFS_CTX";
    125127    else if( type == KMEM_FATFS_CTX )     return "KMEM_FATFS_CTX";
     
    131133    else if( type == KMEM_CONDVAR )       return "KMEM_CONDVAR";
    132134    else if( type == KMEM_MUTEX )         return "KMEM_MUTEX";
     135
    133136    else if( type == KMEM_DIR )           return "KMEM_DIR";
    134 
    135137        else if( type == KMEM_512_BYTES )     return "KMEM_512_BYTES";
    136138
  • trunk/kernel/mm/kmem.h

    r611 r619  
    4545    KMEM_CPU_CTX          = 7,   /*! hal_cpu_context_t                              */
    4646    KMEM_FPU_CTX          = 8,   /*! hal_fpu_context_t                              */
    47     KMEM_BARRIER          = 9,   /*! remote_barrier_t                               */
     47    KMEM_GEN_BARRIER      = 9,   /*! generi_cbarrier_t                              */
    4848
    49     KMEM_DEVFS_CTX        = 10,  /*! fatfs_inode_t                                  */
    50     KMEM_FATFS_CTX        = 11,  /*! fatfs_ctx_t                                    */
    51     KMEM_VFS_CTX          = 12,  /*! vfs_context_t                                  */
    52     KMEM_VFS_INODE        = 13,  /*! vfs_inode_t                                    */
    53     KMEM_VFS_DENTRY       = 14,  /*! vfs_dentry_t                                   */
    54     KMEM_VFS_FILE         = 15,  /*! vfs_file_t                                     */
    55     KMEM_SEM              = 16,  /*! remote_sem_t                                   */
    56     KMEM_CONDVAR          = 17,  /*! remote_condvar_t                               */
    57     KMEM_MUTEX            = 18,  /*! remote_mutex_t                                 */
    58     KMEM_DIR              = 19,  /*! remote_dir_t                                   */
     49    KMEM_SMP_BARRIER      = 10,  /*! simple_barrier_t                               */
     50    KMEM_DEVFS_CTX        = 11,  /*! fatfs_inode_t                                  */
     51    KMEM_FATFS_CTX        = 12,  /*! fatfs_ctx_t                                    */
     52    KMEM_VFS_CTX          = 13,  /*! vfs_context_t                                  */
     53    KMEM_VFS_INODE        = 14,  /*! vfs_inode_t                                    */
     54    KMEM_VFS_DENTRY       = 15,  /*! vfs_dentry_t                                   */
     55    KMEM_VFS_FILE         = 16,  /*! vfs_file_t                                     */
     56    KMEM_SEM              = 17,  /*! remote_sem_t                                   */
     57    KMEM_CONDVAR          = 18,  /*! remote_condvar_t                               */
     58    KMEM_MUTEX            = 19,  /*! remote_mutex_t                                 */
    5959
    60     KMEM_512_BYTES        = 20,  /*! 512 bytes aligned                              */
     60    KMEM_DIR              = 20,  /*! remote_dir_t                                   */
     61    KMEM_512_BYTES        = 21,  /*! 512 bytes aligned                              */
    6162
    62     KMEM_TYPES_NR         = 21,
     63    KMEM_TYPES_NR         = 22,
    6364};
    6465
     
    9798 *************************************************************************************
    9899 * @ req   : local pointer to allocation request.
    99  * @ return a local pointer on page descriptor if PPM (i.e. type KMEM_PAGE).
     100 * @ return a local pointer on page descriptor if KMEM_PAGE.
    100101 *   return a local pointer to allocated buffer if KCM or KHM.
    101102 *   return NULL if no physical memory available.
  • trunk/kernel/mm/vmm.c

    r617 r619  
    800800    // scan the VSL to delete all registered vsegs
    801801    // (don't use a FOREACH for item deletion in xlist)
    802         while( !xlist_is_empty( root_xp ) )
     802
     803uint32_t count = 0;
     804
     805        while( !xlist_is_empty( root_xp ) && (count < 10 ) )
    803806        {
    804807        // get pointer on first vseg in VSL
     
    814817__FUNCTION__ , vseg_type_str( vseg->type ), vseg->vpn_base, vseg->vpn_size );
    815818#endif
     819
     820count++;
    816821
    817822        }
     
    14631468
    14641469#if DEBUG_VMM_ALLOCATE_PAGE
    1465 uint32_t   cycle = (uint32_t)hal_get_cycles();
    1466 thread_t * this  = CURRENT_THREAD;
     1470uint32_t   cycle   = (uint32_t)hal_get_cycles();
     1471thread_t * this    = CURRENT_THREAD;
     1472xptr_t     this_xp = XPTR( local_cxy , this );
    14671473if( DEBUG_VMM_ALLOCATE_PAGE < (uint32_t)hal_get_cycles() )
    14681474printk("\n[%s] thread[%x,%x] enter for vpn %x / cycle %d\n",
     
    19501956
    19511957#if DEBUG_VMM_HANDLE_COW
    1952 uint32_t   cycle = (uint32_t)hal_get_cycles();
    1953 thread_t * this  = CURRENT_THREAD;
     1958uint32_t   cycle   = (uint32_t)hal_get_cycles();
     1959thread_t * this    = CURRENT_THREAD;
     1960xptr_t     this_xp = XPTR( local_cxy , this );
    19541961if( DEBUG_VMM_HANDLE_COW < cycle )
    19551962printk("\n[%s] thread[%x,%x] enter for vpn %x / core[%x,%d] / cycle %d\n",
    1956 __FUNCTION__, process->pid, this->trdid, vpn, local_cxy, this->core->lid, cycle );
     1963__FUNCTION__, this->process->pid, this->trdid, vpn, local_cxy, this->core->lid, cycle );
    19571964#endif
    19581965
     
    19741981    }
    19751982
     1983#if( DEBUG_VMM_HANDLE_COW & 1)
     1984if( DEBUG_VMM_HANDLE_COW < cycle )
     1985printk("\n[%s] thread[%x,%x] get vseg for vpn %x\n",
     1986__FUNCTION__, this->process->pid, this->trdid, vpn );
     1987#endif
     1988
    19761989    // get reference GPT cluster and local pointer
    19771990    ref_cxy = GET_CXY( process->ref_xp );
     
    20012014                     &old_ppn );
    20022015
     2016#if( DEBUG_VMM_HANDLE_COW & 1)
     2017if( DEBUG_VMM_HANDLE_COW < cycle )
     2018printk("\n[%s] thread[%x,%x] get pte for vpn %x : ppn %x / attr %x\n",
     2019__FUNCTION__, this->process->pid, this->trdid, vpn, old_ppn, old_attr );
     2020#endif
     2021
    20032022    // the PTE must be mapped for a COW
    20042023    if( (old_attr & GPT_MAPPED) == 0 )
     
    20082027
    20092028        // release GPT lock in write mode
    2010         remote_rwlock_wr_acquire( gpt_lock_xp );
     2029        remote_rwlock_wr_release( gpt_lock_xp );
    20112030
    20122031        return EXCP_KERNEL_PANIC;
    20132032    }
    20142033
    2015     // get extended pointer, cluster and local pointer on physical page descriptor
     2034    // get pointers on physical page descriptor
    20162035    xptr_t   page_xp  = ppm_ppn2page( old_ppn );
    20172036    cxy_t    page_cxy = GET_CXY( page_xp );
     
    20282047    uint32_t forks = hal_remote_l32( forks_xp );
    20292048
     2049#if( DEBUG_VMM_HANDLE_COW & 1)
     2050if( DEBUG_VMM_HANDLE_COW < cycle )
     2051printk("\n[%s] thread[%x,%x] get forks = %d for vpn %x\n",
     2052__FUNCTION__, this->process->pid, this->trdid, forks, vpn );
     2053#endif
     2054
    20302055    if( forks )        // pending fork => allocate a new page, and copy old to new
    20312056    {
    2032         // allocate a new physical page
     2057        // decrement pending forks counter in page descriptor
     2058        hal_remote_atomic_add( forks_xp , -1 );
     2059
     2060        // release lock protecting "forks" counter
     2061        remote_busylock_release( forks_lock_xp );
     2062
     2063        // allocate a new page
    20332064        page_xp = vmm_page_allocate( vseg , vpn );
     2065
    20342066        if( page_xp == XPTR_NULL )
    20352067        {
     
    20402072            remote_rwlock_wr_acquire( gpt_lock_xp );
    20412073
    2042             // release lock protecting "forks" counter
    2043             remote_busylock_release( forks_lock_xp );
    2044 
    20452074            return EXCP_KERNEL_PANIC;
    20462075        }
     
    20492078        new_ppn = ppm_page2ppn( page_xp );
    20502079
     2080#if( DEBUG_VMM_HANDLE_COW & 1)
     2081if( DEBUG_VMM_HANDLE_COW < cycle )
     2082printk("\n[%s] thread[%x,%x] get new ppn %x for vpn %x\n",
     2083__FUNCTION__, this->process->pid, this->trdid, new_ppn, vpn );
     2084#endif
     2085
    20512086        // copy old page content to new page
    2052         xptr_t  old_base_xp = ppm_ppn2base( old_ppn );
    2053         xptr_t  new_base_xp = ppm_ppn2base( new_ppn );
    2054         memcpy( GET_PTR( new_base_xp ),
    2055                 GET_PTR( old_base_xp ),
    2056                 CONFIG_PPM_PAGE_SIZE );
    2057 
    2058         // decrement pending forks counter in page descriptor
    2059         hal_remote_atomic_add( forks_xp , -1 );
     2087        hal_remote_memcpy( ppm_ppn2base( new_ppn ),
     2088                           ppm_ppn2base( old_ppn ),
     2089                           CONFIG_PPM_PAGE_SIZE );
    20602090
    20612091#if(DEBUG_VMM_HANDLE_COW & 1)
    20622092if( DEBUG_VMM_HANDLE_COW < cycle )
    2063 printk("\n[%s] thread[%x,%x] : pending forks => allocate a new PPN %x\n",
    2064 __FUNCTION__, process->pid, this->trdid, new_ppn );
     2093printk("\n[%s] thread[%x,%x] copied old page to new page\n",
     2094__FUNCTION__, this->process->pid, this->trdid );
    20652095#endif
    20662096
     
    20682098    else               // no pending fork => keep the existing page
    20692099    {
     2100        // release lock protecting "forks" counter
     2101        remote_busylock_release( forks_lock_xp );
    20702102
    20712103#if(DEBUG_VMM_HANDLE_COW & 1)
    20722104if( DEBUG_VMM_HANDLE_COW < cycle )
    2073 printk("\n[%s] thread[%x,%x]  no pending forks => keep existing PPN %x\n",
    2074 __FUNCTION__, process->pid, this->trdid, new_ppn );
     2105printk("\n[%s] thread[%x,%x]  no pending forks / keep existing PPN %x\n",
     2106__FUNCTION__, this->process->pid, this->trdid, old_ppn );
    20752107#endif
    20762108        new_ppn = old_ppn;
    20772109    }
    2078 
    2079     // release lock protecting "forks" counter
    2080     remote_busylock_release( forks_lock_xp );
    20812110
    20822111    // build new_attr : reset COW and set WRITABLE,
    20832112    new_attr = (old_attr | GPT_WRITABLE) & (~GPT_COW);
    20842113
    2085     // update the relevan GPT
     2114    // update the relevant GPT
    20862115    // - private vseg => update local GPT
    20872116    // - public vseg => update all GPT copies
     
    21192148if( DEBUG_VMM_HANDLE_COW < cycle )
    21202149printk("\n[%s] thread[%x,%x] exit for vpn %x / core[%x,%d] / cycle %d\n",
    2121 __FUNCTION__, process->pid, this->trdid, vpn, local_cxy, this->core->lid, cycle );
     2150__FUNCTION__, this->process->pid, this->trdid, vpn, local_cxy, this->core->lid, cycle );
    21222151#endif
    21232152
Note: See TracChangeset for help on using the changeset viewer.