Changeset 50 for trunk/kernel


Ignore:
Timestamp:
Jun 26, 2017, 3:15:11 PM (7 years ago)
Author:
alain
Message:

bloup

Location:
trunk/kernel
Files:
1 added
1 deleted
23 edited

Legend:

Unmodified
Added
Removed
  • trunk/kernel/devices/dev_ioc.c

    r23 r50  
    2222 */
    2323
     24#include <hard_config.h>
    2425#include <kernel_config.h>
    2526#include <hal_types.h>
     
    130131#if USE_IOB    // software L2/L3 cache coherence for memory buffer
    131132
    132     if ( type == IOC_READ )  dev_mmc_inval( XPTR( local_cxy , buffer ) , count<<9 );
    133     else                     dev_mmc_sync ( XPTR( local_cxy , buffer ) , count<<9 );
     133    if ( cmd_type == IOC_READ )  dev_mmc_inval( XPTR( local_cxy , buffer ) , count<<9 );
     134    else                         dev_mmc_sync ( XPTR( local_cxy , buffer ) , count<<9 );
    134135
    135136#endif     // end software L2/L3 cache coherence
  • trunk/kernel/drivers/soclib/soclib_mmc.c

    r4 r50  
    8585
    8686        // get command type
    87         uint32_t cc_cmd = MMC_CC_INVAL ? SOCLIB_MMC_CC_INVAL : SOCLIB_MMC_CC_SYNC;
     87        uint32_t cc_cmd;
     88        if( type == MMC_CC_INVAL )  cc_cmd = SOCLIB_MMC_CC_INVAL;
     89        else                        cc_cmd = SOCLIB_MMC_CC_SYNC;
    8890
    8991        // set SOCLIB_MMC registers to start INVAL/SYNC operation
  • trunk/kernel/kern/cluster.c

    r23 r50  
    2828#include <hal_atomic.h>
    2929#include <hal_special.h>
     30#include <hal_ppm.h>
    3031#include <printk.h>
    3132#include <errno.h>
     
    4344#include <dqdt.h>
    4445
    45 // TODO #include <sysfs.h>
    46 
    4746///////////////////////////////////////////////////////////////////////////////////////////
    4847// Extern global variables
     
    6261error_t cluster_init( struct boot_info_s * info )
    6362{
     63    error_t     error;
    6464    lpid_t      lpid;     // local process_index
    6565    lid_t       lid;      // local core index
     
    8282        spinlock_init( &cluster->kcm_lock );
    8383
     84    cluster_dmsg("\n[INFO] %s for cluster %x enters\n",
     85                 __FUNCTION__ , local_cxy );
     86
    8487    // initialises DQDT
    8588    cluster->dqdt_root_level = dqdt_init( info->x_size,
     
    9093
    9194    // initialises embedded PPM
    92         ppm_init( &cluster->ppm,
    93               info->pages_nr,
    94               info->pages_offset );
     95        error = hal_ppm_init( info );
     96
     97    if( error )
     98    {
     99        printk("\n[ERROR] in %s : cannot initialize PPM in cluster %x\n",
     100               __FUNCTION__ , local_cxy );
     101        return ENOMEM;
     102    }
     103
     104    cluster_dmsg("\n[INFO] %s : PPM initialized in cluster %x at cycle %d\n",
     105                 __FUNCTION__ , local_cxy , hal_time_stamp() );
    95106
    96107    // initialises embedded KHM
    97108        khm_init( &cluster->khm );
    98109
     110    cluster_dmsg("\n[INFO] %s : KHM initialized in cluster %x at cycle %d\n",
     111                 __FUNCTION__ , local_cxy , hal_time_stamp() );
     112
    99113    // initialises embedded KCM
    100114        kcm_init( &cluster->kcm , KMEM_KCM );
     115
     116    cluster_dmsg("\n[INFO] %s : KCM initialized in cluster %x at cycle %d\n",
     117                 __FUNCTION__ , local_cxy , hal_time_stamp() );
    101118
    102119    // initialises all cores descriptors
     
    108125        }
    109126
     127    cluster_dmsg("\n[INFO] %s : cores initialized in cluster %x at cycle %d\n",
     128                 __FUNCTION__ , local_cxy , hal_time_stamp() );
     129
    110130    // initialises RPC fifo
    111131        rpc_fifo_init( &cluster->rpc_fifo );
     132
     133    cluster_dmsg("\n[INFO] %s : RPC fifo inialized in cluster %x at cycle %d\n",
     134                 __FUNCTION__ , local_cxy , hal_time_stamp() );
    112135
    113136    // initialise pref_tbl[] in process manager
     
    132155        xlist_root_init( XPTR( local_cxy , &cluster->pmgr.copies_root[lpid] ) );
    133156    }
     157
     158    cluster_dmsg("\n[INFO] %s Process Manager initialized in cluster %x at cycle %d\n",
     159                 __FUNCTION__ , local_cxy , hal_time_stamp() );
    134160
    135161    hal_wbflush();
  • trunk/kernel/kern/cluster.h

    r23 r50  
    133133
    134134    pmgr_t            pmgr;            /*! embedded process manager                       */
    135 
    136         char              name[CONFIG_SYSFS_NAME_LEN];
    137 
    138 //      sysfs_entry_t     node;
    139135}
    140136cluster_t;
  • trunk/kernel/kern/do_syscall.c

    r23 r50  
    8181    sys_chmod,              // 31
    8282    sys_signal,             // 32
    83     sys_gettimeofday,       // 33
     83    sys_timeofday,          // 33
    8484    sys_kill,               // 34
    8585    sys_getpid,             // 35
  • trunk/kernel/kern/kernel_init.c

    r25 r50  
    5555#include <devfs.h>
    5656
    57 // TODO #include <sysfs.h>
    5857
    5958#define KERNEL_INIT_SYNCHRO  0xA5A5B5B5
     
    115114__attribute__((section(".kdata")))
    116115barrier_t            local_barrier                           CONFIG_CACHE_LINE_ALIGNED;
     116
     117// This variable defines the array of supported File System contexts
     118__attribute__((section(".kdata")))
     119vfs_ctx_t            fs_context[FS_TYPES_NR]                 CONFIG_CACHE_LINE_ALIGNED;
     120
    117121
    118122///////////////////////////////////////////////////////////////////////////////////////////
     
    293297    }
    294298
    295     kinit_dmsg("\n[INFO] %s : core[%x][0] creates ICU chdev at cycle %d\n",
     299    kinit_dmsg("\n[INFO] %s : core[%x][0] created ICU chdev at cycle %d\n",
    296300               __FUNCTION__ , local_cxy , hal_time_stamp() );
    297301
     
    335339        }
    336340
    337         kinit_dmsg("\n[INFO] %s : core[%x][0] creates MMC chdev at cycle %d\n",
     341        kinit_dmsg("\n[INFO] %s : core[%x][0] created MMC chdev at cycle %d\n",
    338342                   __FUNCTION__ , local_cxy , hal_time_stamp() );
    339343    }
     
    370374            chdev_dir.dma[channel] = chdev_xp;
    371375
    372             kinit_dmsg("\n[INFO] %s : core[%x][0] creates DMA[%d] chdev at cycle %d\n",
     376            kinit_dmsg("\n[INFO] %s : core[%x][0] created DMA[%d] chdev at cycle %d\n",
    373377                       __FUNCTION__ , local_cxy , channel , hal_time_stamp() );
    374378        }
     
    395399//
    396400// TODO check that cluster IO contains a PIC [AG]
     401// TODO make a default initialisation for the chdev_dir structure (XPTR_NULL )  [AG]
    397402///////////////////////////////////////////////////////////////////////////////////////////
    398403// @ info    : pointer on the local boot-info structure.
     
    713718    // CP0 allocates one WTI mailbbox per core for Inter Processor Interrupt
    714719    // this must be done after ICU chdev initialisation, by CP0 only, and before
    715     // external devices initialisation to enforce the rule (wti_id == lid)
     720    // external devices initialisation to enforce the rule :
     721    // "The WTI index for the IPI routed to core[lid] is lid"
    716722    if( core_lid == 0 )
    717723    {
     
    733739    }
    734740
    735     // CP0 contribute to initialise external peripheral chdev descriptors.
     741    // All CP0s contribute to initialise external peripheral chdev descriptors.
    736742    // Each CP0[cxy] scan the set of external (shared) peripherals (but the TXT0),
    737743    // and allocates memory for the chdev descriptors that must be placed
     
    779785    }
    780786
    781 printk("\n bloup 0\n");
    782 
    783787    // CP0 in all clusters initializes cooperatively VFS and DEVFS
    784788    if( (core_lid == 0)  )
     
    786790        xptr_t  root_inode_xp;
    787791
    788         // initialize root File System
     792        // initialize root File System (must be FATFS in this implementation)
    789793        if( CONFIG_VFS_ROOT_IS_FATFS )
    790794        {
     
    804808        }
    805809
    806 printk("\n bloup 1\n");
    807 
    808810        // mount the DEVFS File system
    809811            devfs_mount( root_inode_xp , "dev" );
    810812    }
    811 
    812 printk("\n bloup 2\n");
    813813
    814814    // CP0 in I/O cluster print banner
     
    844844
    845845    // each core jump to idle thread
    846 //    asm volatile( "j thread_idle_func\n" );
     846    thread_idle_func();
    847847
    848848} // end kernel_init()
  • trunk/kernel/kern/printk.h

    r23 r50  
    100100///////////////////////////////////////////////////////////////////////////////////
    101101
     102#if CONFIG_CLUSTER_DEBUG
     103#define cluster_dmsg(...)   printk(__VA_ARGS__)
     104#else
     105#define cluster_dmsg(...)
     106#endif
     107
    102108#if CONFIG_CONTEXT_DEBUG
    103109#define context_dmsg(...)   printk(__VA_ARGS__)
     
    112118#endif
    113119
     120#if CONFIG_DEVFS_DEBUG
     121#define devfs_dmsg(...)   printk(__VA_ARGS__)
     122#else
     123#define devfs_dmsg(...)
     124#endif
     125
     126#if CONFIG_DMA_DEBUG
     127#define dma_dmsg(...)   printk(__VA_ARGS__)
     128#else
     129#define dma_dmsg(...)
     130#endif
     131
    114132#if CONFIG_DQDT_DEBUG
    115133#define dma_dmsg(...)   printk(__VA_ARGS__)
     
    136154#endif
    137155
     156#if CONFIG_FATFS_DEBUG
     157#define fatfs_dmsg(...)   printk(__VA_ARGS__)
     158#else
     159#define fatfs_dmsg(...)
     160#endif
     161
    138162#if CONFIG_FBF_DEBUG
    139163#define fbf_dmsg(...)   printk(__VA_ARGS__)
     
    152176#else
    153177#define icu_dmsg(...)
     178#endif
     179
     180#if CONFIG_IDLE_DEBUG
     181#define idle_dmsg(...) printk(__VA_ARGS__)
     182#else
     183#define idle_dmsg(...)
    154184#endif
    155185
  • trunk/kernel/kern/thread.c

    r23 r50  
    749749    while( 1 )
    750750    {
    751         thread_dmsg("\n[INFO] %s : core[%x][%d] goes to sleep at cycle %d\n",
     751        idle_dmsg("\n[INFO] %s : core[%x][%d] goes to sleep at cycle %d\n",
    752752                    __FUNCTION__ , local_cxy , lid , hal_time_stamp() );
    753753
     
    755755        hal_core_sleep();
    756756
    757         thread_dmsg("\n[INFO] %s : core[%x][%d] wake up at cycle %d\n",
     757        idle_dmsg("\n[INFO] %s : core[%x][%d] wake up at cycle %d\n",
    758758                    __FUNCTION__ , local_cxy , lid , hal_time_stamp() );
    759759
  • trunk/kernel/libk/remote_rwlock.c

    r23 r50  
    2626#include <hal_irqmask.h>
    2727#include <thread.h>
     28#include <printk.h>
    2829#include <cluster.h>
    2930#include <scheduler.h>
     
    185186
    186187    // compute extended pointers on lock->ticket, lock->owner and thread->remote_locks
    187     xptr_t              current_xp = XPTR( lock_cxy   , &lock_ptr->ticket );
     188    xptr_t              current_xp = XPTR( lock_cxy   , &lock_ptr->current );
    188189    xptr_t              owner_xp   = XPTR( lock_cxy   , &lock_ptr->owner );
    189190    xptr_t              locks_xp   = XPTR( thread_cxy , &thread_ptr->remote_locks );
     
    202203}  // end remote_rwlock_wr_unlock()
    203204
    204 
     205///////////////////////////////////////////
     206void remote_rwlock_print( xptr_t   lock_xp,
     207                          char   * comment )
     208{
     209    uint32_t     ticket;                // first free ticket index
     210    uint32_t     current;               // ticket index of current owner
     211    uint32_t     count;                 // current number of reader threads
     212    xptr_t       owner;                 // extended pointer on writer thread
     213
     214    // get cluster and local pointer on remote_rwlock
     215    remote_rwlock_t * lock_ptr = (remote_rwlock_t *)GET_PTR( lock_xp );
     216    cxy_t             lock_cxy = GET_CXY( lock_xp );
     217
     218    ticket  = hal_remote_lw ( XPTR( lock_cxy , &lock_ptr->ticket ) );
     219    current = hal_remote_lw ( XPTR( lock_cxy , &lock_ptr->current ) );
     220    count   = hal_remote_lw ( XPTR( lock_cxy , &lock_ptr->count ) );
     221    owner   = hal_remote_lwd( XPTR( lock_cxy , &lock_ptr->owner ) );
     222
     223    printk("\n*** rwlock <%l> %s : ticket = %d / current = %d / count = %d / owner = %l\n",
     224           lock_xp , comment , ticket , current , count , owner );
     225
     226}  // end remote_rwlock_print()
     227
  • trunk/kernel/libk/remote_rwlock.h

    r23 r50  
    9292void remote_rwlock_wr_unlock( xptr_t lock_xp );
    9393
     94/***************************************************************************************
     95 * Display the lock state on kernel TTY.
     96 ***************************************************************************************
     97 * @ lock_xp    : extended pointer on the remote rwlock
     98 * @ comment    : comment to be printed.
     99 **************************************************************************************/
     100void remote_rwlock_print( xptr_t   lock_xp,
     101                          char   * comment );
     102
    94103#endif
  • trunk/kernel/libk/xhtab.c

    r23 r50  
    143143                      xptr_t   xlist_xp )
    144144{
    145 
    146 printk("\n                @@@ xhtab_insert : 0 / name = %s / xhtab_xp = %l / xlist_xp = %l\n",
    147        key , xhtab_xp , xlist_xp );
    148 
    149145    // get xhtab cluster and local pointer
    150146    cxy_t     xhtab_cxy = GET_CXY( xhtab_xp );
     
    154150        uint32_t index = xhtab_ptr->index( key );
    155151
    156 printk("\n                @@@ xhtab_insert : 1 / name = %s / index = %d\n",
    157        key , index );
    158 
    159152    // take the lock protecting hash table
    160153    remote_rwlock_wr_lock( XPTR( xhtab_cxy , &xhtab_ptr->lock ) );
     
    168161        remote_rwlock_wr_unlock( XPTR( xhtab_cxy , &xhtab_ptr->lock ) );
    169162
    170 printk("\n                @@@ xhtab_insert : 2 / name = %s / item_xp = %l\n",
    171        key , item_xp );
    172 
    173163        return EINVAL;
    174164    }
     
    183173        // release the lock protecting hash table
    184174        remote_rwlock_wr_unlock( XPTR( xhtab_cxy , &xhtab_ptr->lock ) );
    185 
    186 printk("\n                @@@ xhtab_insert : 3 / name = %s / item_xp = %l\n",
    187        key , xhtab_ptr->scan( xhtab_xp , index , key ) );
    188175
    189176        return 0;
  • trunk/kernel/mm/kcm.c

    r20 r50  
    33 *
    44 * Author  Ghassan Almaless (2008,2009,2010,2011,2012)
    5  *         Alain Greiner    (2016)
     5 *         Alain Greiner    (2016,2017)
    66 *
    77 * Copyright (c) UPMC Sorbonne Universites
     
    4141// It changes the page status if required.
    4242//////////////////////////////////////////////////////////////////////////////////////
    43 // @ kcm   : pointer on kcm allocator.
    44 // @ ptr  : pointer on active kcm page to use.
     43// @ kcm      : pointer on kcm allocator.
     44// @ kcm_page : pointer on active kcm page to use.
    4545/////////////////////////////////////////////////////////////////////////////////////
    4646static void * kcm_get_block( kcm_t      * kcm,
    47                              kcm_page_t * page )
    48 {
    49         assert( page->active , __FUNCTION__ , "kcm page should be active" );
     47                             kcm_page_t * kcm_page )
     48{
     49        kcm_dmsg("\n[INFO] %s : enters for %s / page %x / count = %d / active = %d\n",
     50                 __FUNCTION__ , kmem_type_str( kcm->type ) ,
     51             (intptr_t)kcm_page , kcm_page->count , kcm_page->active );
     52
     53        assert( kcm_page->active , __FUNCTION__ , "kcm_page should be active" );
    5054
    5155        // get first block available
    52         int32_t index = bitmap_ffs( page->bitmap , kcm->blocks_nr );
    53 
    54         assert( (index != -1) , __FUNCTION__ , "kcm page should not be full" );
     56        int32_t index = bitmap_ffs( kcm_page->bitmap , kcm->blocks_nr );
     57
     58        assert( (index != -1) , __FUNCTION__ , "kcm_page should not be full" );
    5559
    5660        // allocate block
    57         bitmap_clear( page->bitmap , index );
    58 
    59         // increase page refcount
    60         page->refcount ++;
    61 
    62         // change the page to busy no more free block in page
    63         if( page->refcount >= kcm->blocks_nr )
    64         {
    65                 page->active = 0;
    66                 list_unlink( &page->list);
     61        bitmap_clear( kcm_page->bitmap , index );
     62
     63        // increase kcm_page count
     64        kcm_page->count ++;
     65
     66        // change the kcm_page to busy if no more free block in page
     67        if( kcm_page->count >= kcm->blocks_nr )
     68        {
     69        kcm_page->active = 0;
     70                list_unlink( &kcm_page->list);
    6771                kcm->active_pages_nr --;
    6872
    69                 list_add_first( &kcm->busy_root , &page->list);
     73                list_add_first( &kcm->busy_root , &kcm_page->list);
    7074                kcm->busy_pages_nr ++;
    71                 page->busy   = 1;
    72         }
    73 
    74         return (page->base + index * kcm->block_size );
     75                kcm_page->busy = 1;
     76        }
     77
     78    // compute return pointer
     79    void * ptr = (void *)((intptr_t)kcm_page + CONFIG_KCM_SLOT_SIZE
     80                 + (index * kcm->block_size) );
     81
     82        kcm_dmsg("\n[INFO] %s : allocated one block  %s / ptr = %x / page = %x / count = %d\n",
     83                 __FUNCTION__ , kmem_type_str( kcm->type ) , (uint32_t)ptr ,
     84             (intptr_t)kcm_page , kcm_page->count );
     85
     86        return ptr;
    7587
    7688}  // kcm_get_block()
     
    7890/////////////////////////////////////////////////////////////////////////////////////
    7991// This static function releases a previously allocated block.
    80 // It changes the page status if required.
     92// It changes the kcm_page status if required.
    8193/////////////////////////////////////////////////////////////////////////////////////
    8294// @ kcm   : pointer on kcm allocator.
     
    8698                            void  * ptr )
    8799{
    88         kcm_page_t * page;
     100        kcm_page_t * kcm_page;
    89101        uint32_t     index;
    90102
    91         page = (kcm_page_t*)((intptr_t)ptr & CONFIG_PPM_PAGE_MASK);
    92         index = ((uint8_t*)ptr - page->base) / kcm->block_size;
    93 
    94         bitmap_set( page->bitmap , index );
    95         page->refcount --;
     103    // compute pointer on kcm_page from block pointer
     104        kcm_page = (kcm_page_t*)((intptr_t)ptr & ~CONFIG_PPM_PAGE_MASK);
     105
     106    // compute block index from block pointer
     107        index = ((uint8_t *)ptr - (uint8_t *)kcm_page - CONFIG_KCM_SLOT_SIZE) / kcm->block_size;
     108
     109        bitmap_set( kcm_page->bitmap , index );
     110        kcm_page->count --;
    96111
    97112        // change the page to active if it was busy
    98         if( page->busy )
    99         {
    100                 page->busy = 0;
    101                 list_unlink( &page->list );
     113        if( kcm_page->busy )
     114        {
     115                kcm_page->busy = 0;
     116                list_unlink( &kcm_page->list );
    102117                kcm->busy_pages_nr --;
    103118
    104                 list_add_last( &kcm->active_root, &page->list );
     119                list_add_last( &kcm->active_root, &kcm_page->list );
    105120                kcm->active_pages_nr ++;
    106                 page->active = 1;
    107         }
    108 
    109         // change the page to free if last block in active page
    110         if( (page->active) && (page->refcount == 0) )
    111         {
    112                 page->active = 0;
    113                 list_unlink( &page->list);
     121                kcm_page->active = 1;
     122        }
     123
     124        // change the kcm_page to free if last block in active page
     125        if( (kcm_page->active) && (kcm_page->count == 0) )
     126        {
     127                kcm_page->active = 0;
     128                list_unlink( &kcm_page->list);
    114129                kcm->active_pages_nr --;
    115130
    116                 list_add_first( &kcm->free_root , &page->list);
     131                list_add_first( &kcm->free_root , &kcm_page->list);
    117132                kcm->free_pages_nr ++;
    118133        }
     
    121136/////////////////////////////////////////////////////////////////////////////////////
    122137// This static function allocates one page from PPM. It initializes
    123 // the KCM-page descriptor, and introduces the new page into freelist.
     138// the kcm_page descriptor, and introduces the new kcm_page into freelist.
    124139/////////////////////////////////////////////////////////////////////////////////////
    125140static error_t freelist_populate( kcm_t * kcm )
    126141{
    127142        page_t     * page;
    128         kcm_page_t * ptr;
     143        kcm_page_t * kcm_page;
    129144        kmem_req_t   req;
    130145
     
    143158
    144159        // get page base address
    145         ptr = ppm_page2base( page );
     160        kcm_page = (kcm_page_t *)ppm_page2base( page );
    146161
    147162        // initialize KCM-page descriptor
    148         bitmap_set_range( ptr->bitmap , 0 , kcm->blocks_nr );
    149 
    150         ptr->busy          = 0;
    151         ptr->active        = 0;
    152         ptr->refcount      = 0;
    153         ptr->base          = (uint8_t*)ptr + kcm->block_size;
    154         ptr->kcm           = kcm;
    155         ptr->page          = page;
     163        bitmap_set_range( kcm_page->bitmap , 0 , kcm->blocks_nr );
     164
     165        kcm_page->busy          = 0;
     166        kcm_page->active        = 0;
     167        kcm_page->count      = 0;
     168        kcm_page->kcm           = kcm;
     169        kcm_page->page          = page;
    156170
    157171        // introduce new page in free-list
    158         list_add_first( &kcm->free_root , &ptr->list );
     172        list_add_first( &kcm->free_root , &kcm_page->list );
    159173        kcm->free_pages_nr ++;
    160174
     
    170184{
    171185        error_t      error;
    172         kcm_page_t * page;
     186        kcm_page_t * kcm_page;
    173187
    174188        // get a new page from PPM if freelist empty
     
    179193        }
    180194
    181         // get first KCM page from freelist and change its status to active
    182         page = LIST_FIRST( &kcm->free_root, kcm_page_t , list );
    183         list_unlink( &page->list );
     195        // get first KCM page from freelist and unlink it
     196        kcm_page = LIST_FIRST( &kcm->free_root, kcm_page_t , list );
     197        list_unlink( &kcm_page->list );
    184198        kcm->free_pages_nr --;
    185199
    186         return page;
     200        return kcm_page;
    187201
    188202} // freelist_get()
     
    193207                   uint32_t   type )
    194208{
    195         uint32_t     blocks_nr;
    196         uint32_t     block_size;
    197         uint32_t     remaining;
     209    // the kcm_page descriptor mut fit in the KCM slot
     210    assert( (sizeof(kcm_page_t) <= CONFIG_KCM_SLOT_SIZE) ,
     211             __FUNCTION__ , "KCM slot too small\n" );
    198212
    199213        // initialize lock
     
    211225        list_root_init( &kcm->active_root );
    212226
    213         // initialize block size and number of blocks per page
    214         block_size      = ARROUND_UP( kmem_type_size( type ) , 64 );
    215         blocks_nr       = CONFIG_PPM_PAGE_SIZE / block_size;
    216         remaining       = CONFIG_PPM_PAGE_SIZE % block_size;
    217         blocks_nr       = (remaining >= sizeof(kcm_page_t)) ? blocks_nr : blocks_nr - 1;
    218 
    219         kcm->blocks_nr  = blocks_nr;
     227        // initialize block size
     228        uint32_t block_size = ARROUND_UP( kmem_type_size( type ) , CONFIG_KCM_SLOT_SIZE );
    220229        kcm->block_size = block_size;
    221230
     231        // initialize number of blocks per page
     232        uint32_t  blocks_nr = (CONFIG_PPM_PAGE_SIZE - CONFIG_KCM_SLOT_SIZE) / block_size;
     233    kcm->blocks_nr = blocks_nr;
     234
    222235        kcm_dmsg("\n[INFO] %s : KCM %s initialised / block_size = %d / blocks_nr = %d\n",
    223                  __FUNCTION__ , kmem_type_str( type ) , block_size , blocks_nr );
     236                 __FUNCTION__ , kmem_type_str( type ) , kcm->block_size , kcm->blocks_nr );
    224237
    225238}  // kcm_init()
     
    228241void kcm_destroy( kcm_t * kcm )
    229242{
    230         kcm_page_t   * page;
     243        kcm_page_t   * kcm_page;
    231244        list_entry_t * iter;
    232245
     
    237250        LIST_FOREACH( &kcm->free_root , iter )
    238251        {
    239                 page = (kcm_page_t *)LIST_ELEMENT( iter , kcm_page_t , list );
     252                kcm_page = (kcm_page_t *)LIST_ELEMENT( iter , kcm_page_t , list );
    240253                list_unlink( iter );
    241254                kcm->free_pages_nr --;
    242                 ppm_free_pages( page->page );
     255                ppm_free_pages( kcm_page->page );
    243256        }
    244257
     
    246259        LIST_FOREACH( &kcm->active_root , iter )
    247260        {
    248                 page = (kcm_page_t *)LIST_ELEMENT( iter , kcm_page_t , list );
     261                kcm_page = (kcm_page_t *)LIST_ELEMENT( iter , kcm_page_t , list );
    249262                list_unlink( iter );
    250263                kcm->free_pages_nr --;
    251                 ppm_free_pages( page->page );
     264                ppm_free_pages( kcm_page->page );
    252265        }
    253266
     
    255268        LIST_FOREACH( &kcm->busy_root , iter )
    256269        {
    257                 page = (kcm_page_t *)LIST_ELEMENT( iter , kcm_page_t , list );
     270                kcm_page = (kcm_page_t *)LIST_ELEMENT( iter , kcm_page_t , list );
    258271                list_unlink( iter );
    259272                kcm->free_pages_nr --;
    260                 ppm_free_pages( page->page );
     273                ppm_free_pages( kcm_page->page );
    261274        }
    262275
     
    269282void * kcm_alloc( kcm_t * kcm )
    270283{
    271         kcm_page_t * page;
     284        kcm_page_t * kcm_page;
    272285        void       * ptr = NULL;   // pointer on block
    273286
     
    278291        if( list_is_empty( &kcm->active_root ) )  // no active page => get one
    279292        {
    280                 kcm_dmsg("\n[INFO] %s : enters for type %s but no active page => get one\n",
    281                          __FUNCTION__ , kmem_type_str( kcm->type ) );
    282 
    283293                // get a page from free list
    284                 page = freelist_get( kcm );
    285                 if( page == NULL ) return NULL;
     294                kcm_page = freelist_get( kcm );
     295
     296                if( kcm_page == NULL ) return NULL;
    286297
    287298                // insert page in active list
    288                 list_add_first( &kcm->active_root , &page->list );
     299                list_add_first( &kcm->active_root , &kcm_page->list );
    289300                kcm->active_pages_nr ++;
    290                 page->active = 1;
    291         }
    292         else                     // get first page from active list
    293         {
    294                 kcm_dmsg("\n[INFO] %s : enters for type %s with an active page\n",
    295                          __FUNCTION__ , kmem_type_str( kcm->type ) );
    296 
     301            kcm_page->active = 1;
     302
     303        kcm_dmsg("\n[INFO] %s : enters for type %s at cycle %d / new page = %x / count = %d\n",
     304                         __FUNCTION__ , kmem_type_str( kcm->type ) , hal_time_stamp() ,
     305                 (intptr_t)kcm_page , kcm_page->count );
     306
     307        }
     308        else                                    // get first page from active list
     309        {
    297310                // get page pointer from active list
    298                 page = (kcm_page_t *)LIST_FIRST( &kcm->active_root , kcm_page_t , list );
     311                kcm_page = (kcm_page_t *)LIST_FIRST( &kcm->active_root , kcm_page_t , list );
     312
     313                kcm_dmsg("\n[INFO] %s : enters for type %s at cycle %d / page = %x / count = %d\n",
     314                         __FUNCTION__ , kmem_type_str( kcm->type ) , hal_time_stamp() , 
     315                 (intptr_t)kcm_page , kcm_page->count );
    299316        }
    300317
    301318        // get a block from selected active page
    302319        // cannot fail, as an active page cannot be full...
    303         ptr  = kcm_get_block( kcm , page );
     320        ptr  = kcm_get_block( kcm , kcm_page );
    304321
    305322        // release lock
    306         spinlock_unlock(&kcm->lock);
    307 
    308         kcm_dmsg("\n[INFO] %s : allocated one block of type %s / ptr = %x\n",
    309                  __FUNCTION__ , kmem_type_str( kcm->type ) , (uint32_t)ptr );
     323        spinlock_unlock( &kcm->lock );
    310324
    311325        return ptr;
    312326
    313 }  // kcm_alloc()
     327}  // end kcm_allo()
    314328
    315329///////////////////////////
    316330void kcm_free( void * ptr )
    317331{
    318         kcm_page_t * page;
     332        kcm_page_t * kcm_page;
    319333        kcm_t      * kcm;
    320334
    321         if( ptr == NULL ) return;
    322 
    323         page = (kcm_page_t *)((intptr_t)ptr & CONFIG_PPM_PAGE_MASK);
    324         kcm  = page->kcm;
     335        assert( (ptr != NULL) , __FUNCTION__ , "pointer cannot be NULL" );
     336
     337        kcm_page = (kcm_page_t *)((intptr_t)ptr & ~CONFIG_PPM_PAGE_MASK);
     338        kcm      = kcm_page->kcm;
    325339
    326340        // get lock
     
    332346        // release lock
    333347        spinlock_unlock( &kcm->lock );
    334 }
     348
     349}  // end kcm_free()
    335350
    336351////////////////////////////
  • trunk/kernel/mm/kcm.h

    r23 r50  
    3636 * This structure defines a generic Kernel Cache Manager, that is a block allocator,
    3737 * for fixed size objects. It exists a specific KCM allocator for each object type.
    38  * The actual allocated block size is the smallest multiple of 64 bytes that can
    39  * contain one single object.
     38 * The actual allocated block size is the smallest multiple of the KCM slot, that
     39 * contain one single object. The KCM slot is typically 64 bytes, as it must be large
     40 * enough to store the kcm_page descriptor, defined below.
    4041 * The various KCM allocators themselves are not statically allocated in the cluster
    4142 * manager, but are dynamically allocated when required, using the embedded KCM
     
    4647{
    4748        spinlock_t           lock;             /*! protect exclusive access to allocator   */
    48         uint32_t             block_size;       /*! actual block size (bytes)               */
    49         uint32_t             blocks_nr;        /*! number of blocks per page               */
     49        uint32_t             block_size;       /*! rounded block size (bytes)              */
     50        uint32_t             blocks_nr;        /*! max number of blocks per page           */
    5051
    5152        list_entry_t         active_root;      /*! root of active pages list               */
     
    6465/****************************************************************************************
    6566 * This structure defines a KCM-page descriptor.
    66  * A KCM-page can contain up to (CONFIG_PPM_PAGE_SIZE / CONFIG_CACHE_LINE_SIZE) blocks.
     67 * A KCM-page contains at most (CONFIG_PPM_PAGE_SIZE / CONFIG_KCM_SLOT_SIZE) blocks.
    6768 * This kcm page descriptor is stored in the first slot of the page.
    6869 ***************************************************************************************/
     
    7071typedef struct kcm_page_s
    7172{
    72         uint32_t        bitmap[BITMAP_SIZE(CONFIG_KCM_BLOCKS_MAX)];
    73         uint8_t       * base;                  /*! pointer on first block in page          */
    74         kcm_t         * kcm;                   /*! owner KCM allocator                     */
     73        uint32_t        bitmap[2];             /*! at most 64 blocks in a single page      */
    7574        list_entry_t    list;                  /*! [active / busy / free] list member      */
     75    kcm_t         * kcm;                   /*! pointer on kcm allocator                */ 
    7676        page_t        * page;                  /*! pointer on the physical page descriptor */
    77         uint8_t         refcount;              /*! number of allocated blocks              */
    78         uint8_t         busy;                  /*! page busy if non zero                   */
    79         uint8_t         active;                /*! page active if non zero                 */
    80         uint8_t         unused;                /*!                                         */
     77        uint32_t        count;                 /*! number of allocated blocks              */
     78        uint32_t        busy;                  /*! page busy if non zero                   */
     79        uint32_t        active;                /*! page active if non zero                 */
    8180}
    8281kcm_page_t;
  • trunk/kernel/mm/kmem.c

    r23 r50  
    103103    else if( type == KMEM_SEM )           return sizeof( remote_sem_t );
    104104    else if( type == KMEM_CONDVAR )       return sizeof( remote_condvar_t );
     105
     106    else if( type == KMEM_512_BYTES )     return 512;
     107
    105108    else                                  return 0;
    106109}
     
    130133    else if( type == KMEM_SEM )           return "KMEM_SEM";
    131134    else if( type == KMEM_SEM )           return "KMEM_CONDVAR";
     135
     136    else if( type == KMEM_512_BYTES )     return "KMEM_512_BYTES";
     137
    132138    else                                  return "undefined";
    133139}
     
    193199        assert( (type < KMEM_TYPES_NR) , __FUNCTION__ , "illegal KMEM request type" );
    194200
    195         kmem_dmsg("\n[INFO] %s : enters in cluster %x for type %s / size %d\n",
    196                       __FUNCTION__ , local_cxy , kmem_type_str( type ) , size );
     201        kmem_dmsg("\n[INFO] %s : enters in cluster %x for type %s\n",
     202                      __FUNCTION__ , local_cxy , kmem_type_str( type ) );
    197203
    198204    // analyse request type
     
    202208                ptr = (void *)ppm_alloc_pages( size );
    203209
    204         // reset page if required
     210        // reset page if requested
    205211                if( flags & AF_ZERO ) page_zero( (page_t *)ptr );
    206212
     
    217223                if( flags & AF_ZERO ) memset( ptr , 0 , size );
    218224
    219         kmem_dmsg("\n[INFO] %s : exit in cluster %x for type %s / base = %x\n",
    220                   __FUNCTION__, local_cxy , kmem_type_str( type ) , (intptr_t)ptr );
     225        kmem_dmsg("\n[INFO] %s : exit in cluster %x for type %s / base = %x / size = %d\n",
     226                  __FUNCTION__, local_cxy , kmem_type_str( type ) ,
     227                 (intptr_t)ptr , req->size );
    221228        }
    222229    else                                           // KCM allocator
     
    237244                if( flags & AF_ZERO ) memset( ptr , 0 , kmem_type_size( type ) );
    238245
    239         kmem_dmsg("\n[INFO] %s : exit in cluster %x for type %s / base = %x\n",
    240                   __FUNCTION__, local_cxy , kmem_type_str( type ) , (intptr_t)ptr );
     246        kmem_dmsg("\n[INFO] %s : exit in cluster %x for type %s / base = %x / size = %d\n",
     247                  __FUNCTION__, local_cxy , kmem_type_str( type ) ,
     248                  (intptr_t)ptr , kmem_type_size( type ) );
    241249        }
    242250
  • trunk/kernel/mm/kmem.h

    r23 r50  
    5959  KMEM_CONDVAR          = 19,  /*! remote_condvar_t                                 */
    6060
    61   KMEM_TYPES_NR         = 19,
     61  KMEM_512_BYTES        = 20,  /*! 512 bytes aligned                                */
     62 
     63  KMEM_TYPES_NR         = 21,
    6264};
    6365
  • trunk/kernel/mm/ppm.c

    r18 r50  
    33 *
    44 * Authors  Ghassan Almaless (2008,2009,2010,2011,2012)
    5  *          Alain Greiner    (2016)
     5 *          Alain Greiner    (2016,2017)
    66 *
    77 * Copyright (c) UPMC Sorbonne Universites
     
    4848}
    4949
    50 ////////////////////////////////////////////
    51 inline void * ppm_page2base( page_t * page )
     50
     51
     52/////////////////////////////////////////////
     53inline void * ppm_page2vaddr( page_t * page )
    5254{
    5355        ppm_t * ppm = &LOCAL_CLUSTER->ppm;
    54         return (void*)((page - ppm->pages_tbl) << CONFIG_PPM_PAGE_SHIFT);
    55 }
    56 
    57 ////////////////////////////////////////////
    58 inline page_t * ppm_base2page( void * base )
     56        return ppm->vaddr_base + ((page - ppm->pages_tbl) << CONFIG_PPM_PAGE_SHIFT);
     57}
     58
     59//////////////////////////////////////////////
     60inline page_t * ppm_vaddr2page( void * vaddr )
    5961{
    6062        ppm_t * ppm = &LOCAL_CLUSTER->ppm;
    61         return (ppm->pages_tbl + (((uint32_t)base ) >> CONFIG_PPM_PAGE_SHIFT));
    62 }
     63        return ppm->pages_tbl + (vaddr - ppm->vaddr_base);
     64}
     65
     66
    6367
    6468//////////////////////////////////////////
     
    7680}
    7781
     82
     83
    7884///////////////////////////////////////
    79 inline void * ppm_ppn2base( ppn_t ppn )
    80 {
    81         return (void*)( ppn << CONFIG_PPM_PAGE_SHIFT );
    82 }
    83 
    84 ////////////////////////////////////////
    85 inline ppn_t ppm_base2ppn( void * base )
    86 {
    87         return (ppn_t)( (uint32_t)base >> CONFIG_PPM_PAGE_SHIFT );
    88 }
    89 
    90 //////////////////////////////////////////////////
    91 static void ppm_free_pages_nolock( page_t * page )
     85inline void * ppm_ppn2vaddr( ppn_t ppn )
     86{
     87        ppm_t  * ppm  = &LOCAL_CLUSTER->ppm;
     88        return ppm->vaddr_base + (ppn << CONFIG_PPM_PAGE_SHIFT);
     89}
     90
     91//////////////////////////////////////////
     92inline ppn_t ppm_vaddr2ppn( void * vaddr )
     93{
     94        ppm_t  * ppm  = &LOCAL_CLUSTER->ppm;
     95        return  ( (ppm->vaddr_base - vaddr) >> CONFIG_PPM_PAGE_SHIFT );
     96}
     97
     98
     99
     100///////////////////////////////////////////
     101void ppm_free_pages_nolock( page_t * page )
    92102{
    93103        page_t   * buddy;            // searched buddy page descriptor
     
    95105        page_t   * current;          // current (merged) page descriptor
    96106        uint32_t   current_index;    // current (merged) page index
    97         uint32_t   current_order;    // current (merget) page order
     107        uint32_t   current_order;    // current (merged) page order
    98108
    99109    ppm_t    * ppm         = &LOCAL_CLUSTER->ppm;
     
    120130                list_unlink( &buddy->list );
    121131                ppm->free_pages_nr[current_order] --;
    122         ppm->total_free_pages -= (1 << current_order);
    123132
    124133        // merge buddy with current
     
    134143        list_add_first( &ppm->free_pages_root[current_order] , &current->list );
    135144        ppm->free_pages_nr[current_order] ++;
    136     ppm->total_free_pages += (1 << current_order);
    137145
    138146}  // end ppm_free_pages_nolock()
    139 
    140 //////////////////////////////
    141 void ppm_init( ppm_t    * ppm,
    142                uint32_t   pages_nr,        // total pages number
    143                uint32_t   pages_offset )   // occupied pages
    144 {
    145         uint32_t   i;
    146 
    147     // set signature
    148         ppm->signature = PPM_SIGNATURE;
    149 
    150     // initialize lock protecting the free_pages[] array
    151         spinlock_init( &ppm->free_lock );
    152 
    153     // initialize free_pages[] array as empty
    154         ppm->total_free_pages = 0;
    155         for( i = 0 ; i < CONFIG_PPM_MAX_ORDER ; i++ )
    156         {
    157                 list_root_init( &ppm->free_pages_root[i] );
    158                 ppm->free_pages_nr[i] = 0;
    159         }
    160 
    161     // initialize dirty_list as empty
    162     list_root_init( &ppm->dirty_root );
    163 
    164     // initialize pointer on page descriptors array
    165         ppm->pages_tbl = (page_t*)( pages_offset << CONFIG_PPM_PAGE_SHIFT );
    166 
    167     // compute size of pages descriptor array rounded to an integer number of pages
    168     uint32_t bytes = ARROUND_UP( pages_nr * sizeof(page_t), CONFIG_PPM_PAGE_SIZE );
    169 
    170     // compute number of pages required to store page descriptor array
    171         uint32_t pages_array  = bytes >> CONFIG_PPM_PAGE_SHIFT;
    172 
    173     // compute total number of reserved pages (kernel code & pages_tbl[])
    174         uint32_t reserved_pages = pages_offset + pages_array;
    175 
    176         // set pages numbers
    177         ppm->pages_nr      = pages_nr;
    178     ppm->pages_offset  = reserved_pages;
    179 
    180     // initialises all page descriptors in pages_tbl[]
    181         for( i = 0 ; i < pages_nr ; i++ )
    182     {
    183         page_init( &ppm->pages_tbl[i] );
    184 
    185         // TODO optimisation : make only a partial init [AG]
    186         // complete the initialisation when page is allocated [AG]
    187         // ppm->pages_tbl[i].flags = 0;
    188     }
    189 
    190     // - set PG_RESERVED flag for reserved pages (kernel code & pages_tbl[])
    191     // - release all other pages to populate the free lists
    192         for( i = 0 ; i < reserved_pages ; i++)
    193     {
    194         page_set_flag( &ppm->pages_tbl[i] , PG_RESERVED );
    195     }
    196         for( i = reserved_pages ; i < pages_nr ; i++ )
    197         {
    198             ppm_free_pages_nolock( &ppm->pages_tbl[i] );
    199 
    200         // TODO optimisation : decompose this enormous set of small pages
    201         // to a set big pages with various order values
    202         }
    203 
    204     // check consistency
    205     ppm_assert_order( ppm );
    206 
    207 } // end ppm_init()
    208147
    209148////////////////////////////////////////////
     
    216155    ppm_t    * ppm = &LOCAL_CLUSTER->ppm;
    217156
    218         assert( (ppm->signature == PPM_SIGNATURE) , __FUNCTION__ , "PPM non initialised" );
    219 
    220157        assert( (order < CONFIG_PPM_MAX_ORDER) , __FUNCTION__ , "illegal order argument" );
    221158
     
    224161    ppm_dmsg("\n[INFO] %s : enters / order = %d\n",
    225162             __FUNCTION__ , order );
    226 
    227 #if( CONFIG_PPM_DEBUG )
    228 ppm_print( ppm , "before allocation" );
    229 #endif
    230163
    231164    // take lock protecting free lists
     
    252185
    253186    // update free-lists after removing a block
    254         ppm->total_free_pages -= (1 << current_order);
    255187        ppm->free_pages_nr[current_order] --;
    256188        current_size = (1 << current_order);
     
    268200                list_add_first( &ppm->free_pages_root[current_order] , &remaining_block->list );
    269201                ppm->free_pages_nr[current_order] ++;
    270         ppm->total_free_pages += (1 << current_order);
    271202        }
    272203
     
    282213             __FUNCTION__ , (uint32_t)ppm_page2base( block ) , order );
    283214
    284 #if CONFIG_PPM_DEBUG
    285 ppm_print( ppm , "after allocation" );
    286 #endif
    287 
    288215        return block;
    289216}  // end pmm_alloc-pages()
     
    315242        spinlock_lock( &ppm->free_lock );
    316243
    317         printk("\n***  PPM state in cluster %x %s : pages = %d / offset = %d / free = %d ***\n",
    318                local_cxy , string , ppm->pages_nr , ppm->pages_offset , ppm->total_free_pages );
     244        printk("\n***  PPM in cluster %x : %d pages / &pages_tbl = %x / vaddr_base = %x ***\n",
     245               local_cxy , ppm->pages_nr , (intptr_t)ppm->pages_tbl , (intptr_t)ppm->vaddr_base );
    319246
    320247        for( order = 0 ; order < CONFIG_PPM_MAX_ORDER ; order++ )
  • trunk/kernel/mm/ppm.h

    r18 r50  
    3232#include <page.h>
    3333
    34 #define  PPM_SIGNATURE     0xBABEF00D
    3534
    3635/*****************************************************************************************
    3736 * This structure defines the Physical Memory Manager in a cluster.
    38  * In all clusters, the physical memory bank starts at address 0.
    39  * The segments kcode and kdata are mapped in the first "offset" pages.
    40  * The physical page descriptors array is implemented just after this offset zone.
    41  * The main service provided by the PMM is the dynamic allocation of physical pages.
     37 * In all clusters, the physical memory bank starts at local physical address 0.
     38 * The size of this local physical memory is defined by the <pages_nr> field in the
     39 * boot_info structure. It is split in three parts:
     40 * - the "kernel_code" section contains the kernel code, loaded by the boot-loader.
     41 *   It starts at PPN = 0 and the size is defined by the <pages_offset> field in the
     42 *   boot_info structure.
     43 * - the "pages_tbl" section contains the physical page descriptors array. It starts
     44 *   at PPN = pages_offset, and it contains one entry per small physical page in cluster.
     45 *   It is created and initialized by the hal_ppm_create() function. "the
     46 * - The "kernel_heap" section contains all physical pages that are are not in the
     47 *   in the kernel_code and pages_tbl sections, and that have not been reserved by the
     48 *   architecture specific bootloader. The reserved pages are defined in the boot_info
     49 *   structure.
     50 *
     51 * The main service provided by the PMM is the dynamic allocation of physical pages
     52 * from the "kernel_heap" section.
    4253 * This low-level allocator implements the buddy algorithm: an allocated block is
    43  * is an integer number n of 4 Kbytes pages, and n (called order) is a power of 2.
     54 * an integer number n of 4 Kbytes pages, and n (called order) is a power of 2.
    4455 ****************************************************************************************/
     56
    4557typedef struct ppm_s
    4658{
    47         uint32_t       signature;               /*! set when initialised                    */
    48         spinlock_t     free_lock;               /*! lock protecting free_pages[] array      */
     59        spinlock_t     free_lock;               /*! lock protecting free_pages[] lists      */
    4960        list_entry_t   free_pages_root[CONFIG_PPM_MAX_ORDER];  /*! roots of free lists      */
    5061        uint32_t       free_pages_nr[CONFIG_PPM_MAX_ORDER];    /*! numbers of free pages    */
    51     uint32_t       total_free_pages;        /*! total number of free pages              */
    5262        page_t       * pages_tbl;               /*! pointer on page descriptors array       */
    53         uint32_t       pages_nr;                /*! total number of 4 Kbytes physical page  */
    54     uint32_t       pages_offset;            /*! allocated pages for kcode & kdata       */
    55     uint32_t       pages_desc;              /*! allocated pages for pages_tbl[] array   */
    56     spinlock_t     dirty_lock;              /*! lock protecting the dirty list          */
     63        uint32_t       pages_nr;                /*! total number of small physical page     */
     64    spinlock_t     dirty_lock;              /*! lock protecting the dirty pages list    */
    5765    list_entry_t   dirty_root;              /*! root of dirty pages list                */
     66    void         * vaddr_base;              /*! pointer on local physical memory base   */
    5867}
    5968ppm_t;
     
    8089 * @ order        : ln2( number of 4 Kbytes pages)
    8190 * @ returns a pointer on the page descriptor if success / NULL otherwise
    82  ****************************************************************************************/
     91 **************************************************************************************à))**/
    8392page_t * ppm_alloc_pages( uint32_t order );
    8493
     
    93102
    94103/*****************************************************************************************
    95  * This function check if a page descriptor is valid.
     104 * This function check if a page descriptor pointer is valid.
    96105 *****************************************************************************************
    97106 * @ page         : pointer on a page descriptor
     
    101110
    102111/*****************************************************************************************
    103  * Get the page base address from the page descriptor pointer.
     112 * Get the page virtual address from the page descriptor pointer.
    104113 *****************************************************************************************
    105114 * @ page         : pointer to page descriptor
    106  * @ returns page base address
     115 * @ returns virtual address of page itself.
    107116 ****************************************************************************************/
    108 inline void* ppm_page2base( page_t * page );
     117inline void* ppm_page2vaddr( page_t * page );
    109118
    110119/*****************************************************************************************
    111  * Get the page descriptor pointer from the page base address.
     120 * Get the page descriptor pointer from the page virtual address.
    112121 *****************************************************************************************
    113  * @ vaddr        : page base address
     122 * @ vaddr        : page virtual address
    114123 * @ returns pointer on page descriptor
    115124 ****************************************************************************************/
    116 inline page_t * ppm_base2page( void * vaddr );
     125inline page_t * ppm_vaddr2page( void * vaddr );
    117126
    118127/*****************************************************************************************
     
    133142
    134143/*****************************************************************************************
    135  * Get the page base address from the PPN.
     144 * Get the page virtual address from the PPN.
    136145 *****************************************************************************************
    137146 * @ ppn          : physical page number
    138  * @ returns page base address
     147 * @ returns page virtual address.
    139148 ****************************************************************************************/
    140 inline void* ppm_ppn2base( ppn_t ppn );
     149inline void* ppm_ppn2vaddr( ppn_t ppn );
    141150
    142151/*****************************************************************************************
    143  * Get the PPN from the page base address.
     152 * Get the PPN from the page virtual address.
    144153 *****************************************************************************************
    145  * @ vaddr        : page base address
    146  * @ returns physical page number
     154 * @ vaddr        : page virtual address
     155 * @ returns physical page number.
    147156 ****************************************************************************************/
    148 inline ppn_t ppm_base2ppn( void * base );
     157inline ppn_t ppm_vaddr2ppn( void * base );
    149158
    150159/*****************************************************************************************
  • trunk/kernel/mm/vmm.c

    r23 r50  
    936936    offset = (uint32_t)( ((intptr_t)ptr) & CONFIG_PPM_PAGE_MASK );
    937937
    938     if( local_cxy == GET_CXY( process->ref_xp) )   // calling process is reference process
     938    if( local_cxy == GET_CXY( process->ref_xp) ) // calling process is reference process
    939939    {
    940940        error = vmm_get_pte( process, vpn , &attr , &ppn );
    941941    }
    942     else                                           // use a RPC
     942    else                                         // calling process is not reference process
    943943    {
    944944        cxy_t       ref_cxy = GET_CXY( process->ref_xp );
  • trunk/kernel/syscalls/sys_thread_sleep.c

    r23 r50  
    3030int sys_thread_sleep()
    3131{
    32     thread_t * this = CURRENT_THREAD;
    33 
    3432    thread_dmsg("\n[INFO] %s : thread %x in process %x goes to sleep at cycle %d\n",
    35                 __FUNCTION__ , this->trdid , this->process->pid , hal_time_stamp() );
     33                __FUNCTION__, CURRENT_THREAD->trdid, CURRENT_PROCESS->pid, hal_time_stamp() );
    3634
    3735    thread_block( CURRENT_THREAD , THREAD_BLOCKED_GLOBAL );
     
    3937
    4038    thread_dmsg("\n[INFO] %s : thread %x in process %x resume at cycle\n",
    41                 __FUNCTION__ , this->trdid , this->process->pid , hal_time_stamp() );
     39                __FUNCTION__, CURRENT_THREAD->trdid, CURRENT_PROCESS->pid, hal_time_stamp() );
    4240
    4341        return 0;
  • trunk/kernel/syscalls/syscalls.h

    r23 r50  
    528528 * @ return 0 if success / returns -1 if failure.
    529529 ********************************************************************************************/
    530 int sys_gettimeofday( struct timeval  * tv,
    531                       struct timezone * tz );
     530int sys_timeofday( struct timeval  * tv,
     531                   struct timezone * tz );
    532532
    533533/*********************************************************************************************
  • trunk/kernel/vfs/devfs.c

    r23 r50  
    139139    xptr_t   new_inode_xp;
    140140
    141 printk("\n        @@@ devfs_chdev : 0 / name = %s\n", name );
    142 
     141    devfs_dmsg("\n[INFO] %s : create dentry for %s\n", __FUNCTION__ , name );
     142   
    143143    // create vfs_dentry in local cluster
    144144    error = vfs_dentry_create( FS_TYPE_DEVFS,
     
    146146                               parent,
    147147                               &new_dentry_xp );
    148 
    149 printk("\n        @@@ devfs_chdev : 1 / name = %s\n", name );
    150 
    151148    if ( error )
    152149    {
     
    156153    }
    157154
    158 printk("\n        @@@ devfs_chdev : 2 / name = %s\n", name );
    159 
     155    devfs_dmsg("\n[INFO] %s : create inode for %s\n", __FUNCTION__ , name );
     156   
    160157    // create vfs_inode in local cluster
    161158    uint32_t  attr   = 0;
     
    171168                              gid,
    172169                              &new_inode_xp );
    173 
    174 printk("\n        @@@ devfs_chdev : 3 / name = %s\n", name );
    175 
    176170    if( error )
    177171    {
     
    229223    ///// step 1 : all clusters initialize local DEVFS context  /////
    230224
    231 printk("\n    @@@ devfs_mount : 0 / name = %s\n", devfs_root_name );
    232 
    233225    devfs_ctx_init( vfs_ctx , parent_inode_xp );
    234226
    235227    ///// step 2 : cluster_0 creates DEVFS root    /////
    236 
    237 printk("\n    @@@ devfs_mount : 1 / name = %s\n", devfs_root_name );
    238228
    239229    if( local_cxy == 0 )
     
    242232                                parent_inode_xp,
    243233                                &root_inode_xp );
    244 printk("\n    @@@ devfs_mount : 2\n");
    245 
    246234    }
    247235
     
    251239    ///// step 3 : all clusters create "internal" directory and chdevs  /////
    252240
    253 printk("\n    @@@ devfs_mount : 3 / name = %s\n", devfs_root_name );
     241    // TODO check device existence : (chdev_xp != XPTR_NULL) in chdev_dir
    254242
    255243    snprintf( node_name , 16 , "internal_%x" , local_cxy );
    256 
    257 printk("\n    @@@ devfs_mount : 4 / name = %s\n", devfs_root_name );
    258244
    259245    devfs_create_directory( node_name,
    260246                            root_inode_xp,
    261247                            &internal_inode_xp );
    262 
    263 printk("\n    @@@ devfs_mount : 5 / name = %s\n", devfs_root_name );
    264248
    265249    // create ICU chdev inode
     
    270254                          &chdev_inode_xp );
    271255
    272 printk("\n    @@@ devfs_mount : 6 / name = %s\n", devfs_root_name );
    273 
    274256    // create MMC chdev inode
    275257    chdev_ptr = (chdev_t *)GET_PTR( chdev_dir.mmc[local_cxy] );
     
    279261                          &chdev_inode_xp );
    280262
    281 printk("\n    @@@ devfs_mount : 7 / name = %s\n", devfs_root_name );
    282 
    283263    // create DMA chdev inodes (one DMA channel per core)
    284264    for( channel = 0 ; channel < cluster->cores_nr ; channel++ )
     
    290270                              (vfs_inode_t *)GET_PTR( internal_inode_xp ),
    291271                              &chdev_inode_xp );
    292 
    293 printk("\n    @@@ devfs_mount : 8 / name = %s\n", devfs_root_name );
    294 
    295272    }
    296273
    297274    ///// step 4 : cluster_io creates "external" directory and chdevs /////
     275
     276    // TODO check device existence : (chdev_xp != XPTR_NULL) in chdev_dir
    298277
    299278    if( local_cxy == cluster->io_cxy )
     
    302281                                root_inode_xp,
    303282                                &external_inode_xp );
    304 
    305 printk("\n    @@@ devfs_mount : 9 / name = %s\n", devfs_root_name );
    306283
    307284        // create IOB chdev inode
     
    312289                              &chdev_inode_xp );
    313290       
    314 printk("\n    @@@ devfs_mount : 10 / name = %s\n", devfs_root_name );
    315 
    316291        // create PIC chdev inode
    317292        chdev_ptr = (chdev_t *)GET_PTR( chdev_dir.pic );
     
    374349                                  (vfs_inode_t *)GET_PTR( external_inode_xp ),
    375350                                  &chdev_inode_xp );
    376 printk("\n    @@@ devfs_mount : 11 / name = %s\n", devfs_root_name );
    377 
    378351        }
    379352    }
     
    676649
    677650
    678 const struct vfs_file_op_s devfs_f_op =
    679 {
    680         .open    = devfs_open,
    681         .read    = devfs_read,
    682         .write   = devfs_write,
    683         .lseek   = devfs_lseek,
    684         .mmap    = devfs_mmap,
    685         .munmap  = devfs_munmap,
    686         .readdir = devfs_readdir,
    687         .close   = devfs_close,
    688         .release = devfs_release
    689 };
    690 
    691651*/
    692652
  • trunk/kernel/vfs/fatfs.c

    r23 r50  
    3636#include <fatfs.h>
    3737
     38
    3839//////////////////////////////////////////////////////////////////////////////////////////
    3940//          Extern  variables         
    4041//////////////////////////////////////////////////////////////////////////////////////////
    4142
    42 extern vfs_ctx_t        fs_context[FS_TYPES_NR];   // allocated in vfs.c file
    43 
    44 extern remote_barrier_t global_barrier;            // allocated dans kernel_init.c
     43extern vfs_ctx_t          fs_context[FS_TYPES_NR];   // allocated in vfs.c file
     44
     45extern remote_barrier_t   global_barrier;            // allocated in kernel_init.c
    4546 
    4647//////////////////////////////////////////////////////////////////////////////////////////
     
    297298                        xptr_t        root_inode_xp )
    298299{
    299     error_t  error;
    300     uint8_t  buffer[512];    // buffer for boot record
    301 
    302     // make a synchronous access to IOC device to read the boot record from device
     300    error_t     error;
     301    uint8_t   * buffer;
     302    kmem_req_t  req;
     303
     304    // allocate a 512 bytes buffer to store the boot record
     305        req.type    = KMEM_512_BYTES;
     306    req.flags   = AF_KERNEL | AF_ZERO;
     307        buffer      = (uint8_t *)kmem_alloc( &req );
     308     
     309    fatfs_dmsg("\n[INFO] %s : enters with buffer = %x\n",
     310               __FUNCTION__ , (intptr_t)buffer );
     311
     312    // load the boot record from device
     313    // using a synchronous access to IOC device 
    303314    error = dev_ioc_sync_read( buffer , 0 , 1 );
    304     assert( (error == 0) , __FUNCTION__ , "cannot access FAT boot record" );
     315
     316    assert( (error == 0) , __FUNCTION__ , "cannot access boot record" );
     317
     318#if CONFIG_FAT_DEBUG
     319    uint32_t   line;
     320    uint32_t   byte = 0;
     321    printk("\n*** boot record at cycle %d ***\n", hal_time_stamp() );
     322    for ( line = 0 ; line < 32 ; line++ )
     323    {
     324        printk(" %X | %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x |\n",
     325               byte,
     326               buffer[byte+ 0],buffer[byte+ 1],buffer[byte+ 2],buffer[byte+ 3],
     327               buffer[byte+ 4],buffer[byte+ 5],buffer[byte+ 6],buffer[byte+ 7],
     328               buffer[byte+ 8],buffer[byte+ 9],buffer[byte+10],buffer[byte+11],
     329               buffer[byte+12],buffer[byte+13],buffer[byte+14],buffer[byte+15] );
     330
     331         byte += 16;
     332    }
     333#endif
    305334
    306335    // check sector size from boot record
    307336    uint32_t sector_size = get_record_from_buffer( BPB_BYTSPERSEC , buffer , 1 );
     337
    308338    assert( (sector_size == 512) , __FUNCTION__ , "sector size must be 512 bytes" );
    309339
    310340    // check cluster size from boot record
    311341    uint32_t nb_sectors = get_record_from_buffer( BPB_SECPERCLUS , buffer , 1 );
     342
    312343    assert( (nb_sectors == 8) , __FUNCTION__ , "cluster size must be 8 sectors" );
    313344
    314345    // check number of FAT copies from boot record
    315346    uint32_t nb_fats = get_record_from_buffer( BPB_NUMFATS , buffer , 1 );
     347
    316348    assert( (nb_fats == 1) , __FUNCTION__ , "number of FAT copies must be 1" );
    317349
    318350    // get & check number of sectors in FAT from boot record
    319351    uint32_t fat_sectors = get_record_from_buffer( BPB_FAT32_FATSZ32 , buffer , 1 );
     352
    320353    assert( ((fat_sectors & 0xF) == 0) , __FUNCTION__ , "FAT not multiple of 16 sectors");
    321354
    322355    // get and check root cluster from boot record
    323356    uint32_t root_cluster = get_record_from_buffer( BPB_FAT32_ROOTCLUS , buffer , 1 );
     357
    324358    assert( (root_cluster == 2) , __FUNCTION__ , "Root cluster index must be  2");
    325359
    326360    // get FAT lba from boot record
    327361    uint32_t fat_lba = get_record_from_buffer( BPB_RSVDSECCNT , buffer , 1 );
    328    
     362
     363    // release the 512 bytes buffer
     364    req.type = KMEM_512_BYTES;
     365    req.ptr  = buffer;
     366    kmem_free( &req );
     367
    329368    // allocate a mapper for the FAT itself
    330369    mapper_t * fat_mapper = mapper_create();
     370
    331371    assert( (fat_mapper != NULL) , __FUNCTION__ , "no memory for FAT mapper" );
    332372
     
    342382    fatfs_ctx->fat_mapper_xp         = XPTR( local_cxy , fat_mapper );
    343383
     384    fatfs_dmsg("\n*** FAT context ***\n"
     385               "- fat_sectors     = %d\n"
     386               "- sector size     = %d\n"
     387               "- cluster size    = %d\n"
     388               "- fat_first_lba   = %d\n"
     389               "- data_first_lba  = %d\n"
     390               "- mapper          = %l\n",
     391               fatfs_ctx->fat_sectors_count,
     392               fatfs_ctx->bytes_per_sector,
     393               fatfs_ctx->bytes_per_cluster,
     394               fatfs_ctx->fat_begin_lba,
     395               fatfs_ctx->cluster_begin_lba,
     396               fatfs_ctx->fat_mapper_xp );
     397
    344398    // initialize the VFS context
    345399    vfs_ctx->type    = FS_TYPE_FATFS;
  • trunk/kernel/vfs/vfs.c

    r23 r50  
    4949
    5050//////////////////////////////////////////////////////////////////////////////////////////
    51 //           Global variables         
    52 //////////////////////////////////////////////////////////////////////////////////////////
    53 
    54 // array of supported FS contexts
    55 vfs_ctx_t   fs_context[FS_TYPES_NR];
    56 
     51//           Extern variables         
     52//////////////////////////////////////////////////////////////////////////////////////////
     53
     54extern vfs_ctx_t   fs_context[FS_TYPES_NR];    // allocate in kernel_init.c
     55 
    5756//////////////////////////////////////////////////////////////////////////////////////////
    5857//           Context related functions
     
    297296        kmem_req_t       req;        // request to kernel memory allocator
    298297
    299 printk("\n            @@@ dentry_create : 0 / name = %s\n", name );
    300 
    301298    // check type and get pointer on context
    302299    if     ( fs_type == FS_TYPE_FATFS ) ctx = &fs_context[FS_TYPE_FATFS];
     
    319316    }
    320317
    321 printk("\n            @@@ dentry_create : 1 / name = %s\n", name );
    322 
    323318    // allocate memory for dentry descriptor
    324319        req.type  = KMEM_VFS_DENTRY;
     
    340335    strcpy( dentry->name , name );
    341336
    342 printk("\n            @@@ dentry_create : 2 / name = %s\n", name );
    343 
    344337    // register dentry in hash table rooted in parent inode
    345338    xhtab_insert( XPTR( local_cxy , &parent->children ),
    346339                  name,
    347340                  XPTR( local_cxy , &dentry->xlist ) );
    348 
    349 printk("\n            @@@ dentry_create : 3 / name = %s\n", name );
    350341
    351342    // return extended pointer on dentry
Note: See TracChangeset for help on using the changeset viewer.