Changeset 686


Ignore:
Timestamp:
Jan 13, 2021, 12:47:53 AM (3 years ago)
Author:
alain
Message:

cosmetic

Location:
trunk/hal
Files:
16 edited

Legend:

Unmodified
Added
Removed
  • trunk/hal/generic/hal_gpt.h

    r640 r686  
    22 * hal_gpt.h - Generic Page Table API definition.
    33 *
    4  * Authors  Alain Greiner (2016,2017,2018,2019)
     4 * Authors  Alain Greiner (2016,2017,2018,2019,2020)
    55 *
    66 * Copyright (c) UPMC Sorbonne Universites
     
    3737//   defined as a 32 bits-vector.
    3838//
    39 // Any arch-specific implementation must implement this API.
     39// Any architecture-specific implementation must implement this API.
    4040/////////////////////////////////////////////////////////////////////////////////////////
    4141
  • trunk/hal/generic/hal_special.h

    r679 r686  
    22 * hal_special.h - Generic Special Registers Access API definition.
    33 *
    4  * Authors   Alain Greiner    (2016,2017)
     4 * Authors   Alain Greiner        (2016,2017,2018,2019,2020)
    55 *
    66 * Copyright (c)  UPMC Sorbonne Universites
     
    4848 * This function initializes - for architectures requiring it - the MMU registers of the
    4949 * calling core to use the the kernel page table identified by the <gpt> argument for
    50  * all threads attached to kernel process_zero.
     50 * all kernel threads attached to kernel process_zero.
    5151 * It is called by all cores in the kernel_init() function.
    5252 *****************************************************************************************
  • trunk/hal/tsar_mips32/core/hal_context.c

    r679 r686  
    22 * hal_context.c - implementation of Thread Context API for TSAR-MIPS32
    33 *
    4  * Author  Alain Greiner    (2016,2017,2018,2019)
     4 * Author  Alain Greiner    (2016,2017,2018,2019,2019)
    55 *
    66 * Copyright (c)  UPMC Sorbonne Universites
     
    121121{
    122122
    123 assert( __FUNCTION__, (sizeof(hal_cpu_context_t) <= CONFIG_CPU_CTX_SIZE), "illegal CPU context size" );
     123assert( __FUNCTION__, (sizeof(hal_cpu_context_t) <= CONFIG_CPU_CTX_SIZE),
     124"illegal CPU context size" );
    124125
    125126    // allocate memory for cpu_context
    126     kmem_req_t  req;
    127     req.type   = KMEM_KCM;
    128     req.order  = bits_log2( sizeof(hal_cpu_context_t) );
    129     req.flags  = AF_KERNEL | AF_ZERO;
    130 
    131     hal_cpu_context_t * context = kmem_alloc( &req );
    132 
     127    hal_cpu_context_t * context = kmem_alloc( bits_log2( sizeof(hal_cpu_context_t) ),
     128                                              AF_KERNEL | AF_ZERO);
    133129    if( context == NULL ) return -1;
    134130
     
    152148    hal_cpu_context_t * context = (hal_cpu_context_t *)thread->cpu_context;
    153149
    154 assert( __FUNCTION__, (context != NULL ), "CPU context not allocated" );
     150assert( __FUNCTION__, (context != NULL ),
     151"CPU context not allocated" );
    155152
    156153    // compute the PPN for the GPT PT1
     
    405402void hal_cpu_context_destroy( thread_t * thread )
    406403{
    407     kmem_req_t          req;
    408 
     404    // get pointer on CPU context
    409405    hal_cpu_context_t * ctx = thread->cpu_context;
    410406
    411407    // release CPU context if required
    412     if( ctx != NULL )
    413     {   
    414         req.type = KMEM_KCM;
    415         req.ptr  = ctx;
    416         kmem_free( &req );
    417     }
     408    if( ctx != NULL )  kmem_free( ctx , bits_log2( sizeof(hal_cpu_context_t)) );
    418409
    419410}  // end hal_cpu_context_destroy()
     
    434425
    435426    // allocate memory for fpu_context
    436     kmem_req_t  req;
    437     req.type   = KMEM_KCM;
    438     req.flags  = AF_KERNEL | AF_ZERO;
    439     req.order  = bits_log2( sizeof(hal_fpu_context_t) );
    440 
    441     hal_fpu_context_t * context = kmem_alloc( &req );
     427    hal_fpu_context_t * context = kmem_alloc( bits_log2( sizeof(hal_fpu_context_t) ),
     428                                              AF_KERNEL | AF_ZERO );
    442429
    443430    if( context == NULL ) return -1;
     
    454441    hal_fpu_context_t * context = thread->fpu_context;
    455442
    456     assert( __FUNCTION__, (context != NULL) , "fpu context not allocated" );
     443assert( __FUNCTION__, (context != NULL) ,
     444"fpu context not allocated" );
    457445
    458446    memset( context , 0 , sizeof(hal_fpu_context_t) );
     
    478466void hal_fpu_context_destroy( thread_t * thread )
    479467{
    480     kmem_req_t  req;
    481 
    482     hal_fpu_context_t * context = thread->fpu_context;
     468    // get pointer on FPU context
     469    hal_fpu_context_t * ctx = thread->fpu_context;
    483470
    484471    // release FPU context if required
    485     if( context != NULL )
    486     {   
    487         req.type = KMEM_KCM;
    488         req.ptr  = context;
    489         kmem_free( &req );
    490     }
     472    if( ctx != NULL ) kmem_free( ctx , bits_log2( sizeof(hal_fpu_context_t)) );
    491473
    492474}  // end hal_fpu_context_destroy()
  • trunk/hal/tsar_mips32/core/hal_drivers.c

    r679 r686  
    22 * hal_drivers.c - Driver initializers for TSAR
    33 *
    4  * Copyright (c) 2017 Maxime Villard
     4 * Author        Maxime Villard (2017)
     5 *
     6 * Copyright (c) UPMC Sorbonne Universites
    57 *
    68 * This file is part of ALMOS-MKH.
  • trunk/hal/tsar_mips32/core/hal_exception.c

    r635 r686  
    228228            // try to map the unmapped PTE
    229229            error = vmm_handle_page_fault( process,
    230                                            bad_vaddr >> CONFIG_PPM_PAGE_SHIFT );
     230                                           bad_vaddr >> CONFIG_PPM_PAGE_ORDER );
    231231
    232232            if( error == EXCP_NON_FATAL )            // page-fault successfully handled
     
    278278            // try to handle a possible COW
    279279            error = vmm_handle_cow( process,
    280                                     bad_vaddr >> CONFIG_PPM_PAGE_SHIFT );
     280                                    bad_vaddr >> CONFIG_PPM_PAGE_ORDER );
    281281
    282282            if( error == EXCP_NON_FATAL )        // COW successfully handled
     
    358358    remote_busylock_acquire( lock_xp );
    359359
    360     nolock_printk("\n=== thread(%x,%x) / core[%d] / cycle %d ===\n",
    361     process->pid, this->trdid, core->lid, (uint32_t)hal_get_cycles() );
     360    nolock_printk("\n=== thread(%x,%x) / core[%x,%d] / cycle %d ===\n",
     361    process->pid, this->trdid, process->pid, core->lid, (uint32_t)hal_get_cycles() );
    362362
    363363        nolock_printk("busylocks = %d / blocked_vector = %X / flags = %X\n\n",
  • trunk/hal/tsar_mips32/core/hal_gpt.c

    r679 r686  
    22 * hal_gpt.c - implementation of the Generic Page Table API for TSAR-MIPS32
    33 *
    4  * Author   Alain Greiner (2016,2017,2018,2019)
     4 * Author   Alain Greiner (2016,2017,2018,2019,2020)
    55 *
    66 * Copyright (c) UPMC Sorbonne Universites
     
    3737
    3838////////////////////////////////////////////////////////////////////////////////////////
     39// The Page Table for the TSAR-MIPS32 MMU is defined as a two levels radix tree.
     40//
     41// It defines two page sizes : 4 Kbytes pages, and 2 Mbytes pages.
     42// The virtual address space size is 4 Gbytes (32 bits virtual addresses).
     43// The physical address space is limited to 1 Tbytes (40 bits physical addresses).
     44// - For a 4 Kbytes page, the VPN uses 20 bits, and the PPN requires 28 bits.
     45// - For a 2 Mbytes page, the PPN uses 11 bits, and the PPN requires 19 bits.
     46//
     47// The first level array (PT1) contains 2048 entries, each entry contains 4 bytes,
     48// and this array is aligned on a 8K bytes boundary.
     49//
     50// The second level array (PT2) contains 512 entries, each entry contains 8 bytes,
     51// and this array is ligned on a 4K bytes boundary.
     52////////////////////////////////////////////////////////////////////////////////////////
     53
     54
     55////////////////////////////////////////////////////////////////////////////////////////
    3956// This define the masks for the TSAR MMU PTE attributes (from TSAR MMU specification)
    4057////////////////////////////////////////////////////////////////////////////////////////
     
    152169
    153170// check page size
    154 assert( __FUNCTION__, (CONFIG_PPM_PAGE_SIZE == 4096) , "the TSAR page size must be 4 Kbytes\n" );
    155 
    156     // allocates 2 physical pages for PT1
    157         kmem_req_t req;
    158         req.type  = KMEM_PPM;
    159         req.order = 1;                     // 2 small pages
    160         req.flags = AF_KERNEL | AF_ZERO;
    161         base = kmem_alloc( &req );
     171assert( __FUNCTION__, (CONFIG_PPM_PAGE_SIZE == 4096) ,
     172"the TSAR page size must be 4 Kbytes\n" );
     173
     174    // allocates 8 Kbytes for PT1
     175        base = kmem_alloc( 13 , AF_ZERO );
    162176
    163177        if( base == NULL )
     
    197211    uint32_t   * pt2;
    198212    uint32_t     attr;
    199         kmem_req_t   req;
    200213
    201214    thread_t * this = CURRENT_THREAD;
     
    241254                }
    242255
    243                 // release the page allocated for the PT2
    244                 req.type = KMEM_PPM;
    245                 req.ptr  = pt2;
    246                 kmem_free( &req );
     256                // release the 4K bytes allocated for the PT2
     257                kmem_free( pt2 , 12 );
    247258            }
    248259        }
    249260        }
    250261
    251     // release the PT1
    252     req.type = KMEM_PPM;
    253     req.ptr  = pt1;
    254     kmem_free( &req );
     262    // release the 8K bytes allocated for PT1
     263    kmem_free( pt1 , 13 );
    255264
    256265#if DEBUG_HAL_GPT_DESTROY
     
    272281    xptr_t              pte1_xp;         // extended pointer on PT1[x1] entry
    273282        uint32_t            pte1;            // value of PT1[x1] entry
    274 
    275     kmem_req_t          req;             // kmem request fro PT2 allocation
    276 
    277283    uint32_t          * pt2;             // local pointer on PT2 base
    278284        ppn_t               pt2_ppn;         // PPN of page containing PT2
     
    334340            hal_disable_irq( &sr_save );
    335341
    336             req.type  = KMEM_PPM;
    337             req.order = 0; 
    338             req.flags = AF_ZERO | AF_KERNEL;
    339             pt2       = kmem_remote_alloc( gpt_cxy , &req );
     342            // allocate a 4K bytes PT2
     343            pt2       = kmem_remote_alloc( gpt_cxy , 12 , AF_ZERO );
    340344
    341345            if( pt2 == NULL )
     
    863867    uint32_t   * dst_pt2;   // local pointer on DST PT2
    864868
    865         kmem_req_t   req;       // for PT2 allocation
    866 
    867869    uint32_t     src_pte1;
    868870    uint32_t     dst_pte1;
     
    917919        if( (dst_pte1 & TSAR_PTE_MAPPED) == 0 )
    918920        {
    919             // allocate one physical page for a new PT2
    920                 req.type  = KMEM_PPM;
    921                 req.order = 0;                     // 1 small page
    922                 req.flags = AF_KERNEL | AF_ZERO;
    923                 dst_pt2   = kmem_alloc( &req );
     921            // allocate one 4K bytes physical page for a new PT2
     922                dst_pt2   = kmem_alloc( 12 , AF_ZERO );
    924923
    925924            if( dst_pt2 == NULL )
  • trunk/hal/tsar_mips32/core/hal_ppm.c

    r632 r686  
    7979
    8080        // compute number of pages required to store page descriptor array
    81         uint32_t pages_tbl_nr = bytes >> CONFIG_PPM_PAGE_SHIFT;
     81        uint32_t pages_tbl_nr = bytes >> CONFIG_PPM_PAGE_ORDER;
    8282
    8383        // compute total number of reserved pages (kernel code & pages_tbl[])
     
    9090        ppm->vaddr_base = NULL;
    9191        ppm->pages_tbl  = (page_t*)( ppm->vaddr_base +
    92                                      (pages_tbl_offset << CONFIG_PPM_PAGE_SHIFT) );
     92                                     (pages_tbl_offset << CONFIG_PPM_PAGE_ORDER) );
    9393
    9494        // initialize all page descriptors in pages_tbl[]
  • trunk/hal/tsar_mips32/core/hal_special.c

    r658 r686  
    22 * hal_special.c - implementation of Generic Special Register Access API for TSAR-MIPS32
    33 *
    4  * Author    Alain Greiner (2016,2017)
     4 * Author    Alain Greiner (2016,2017,2018,2019,2020)
    55 *
    66 * Copyright (c) UPMC Sorbonne Universites
     
    5555// For the TSAR architecture, this function register the physical address of
    5656// the first level page table (PT1) in the PTPR register.
    57 // It activates the intructions MMU, and de-activates the data MMU.
     57// It activates the intructions MMU, and de-activates the data MMU, that is NOT
     58// used by the kernel for 32 bits architectures.
    5859/////////////////////////////////////////////////////////////////////////////////
    5960void hal_mmu_init( gpt_t * gpt )
  • trunk/hal/tsar_mips32/core/hal_uspace.c

    r679 r686  
    4949    uint32_t cxy = (uint32_t)GET_CXY( k_dst_xp );
    5050 
    51 assert( __FUNCTION__, (CURRENT_THREAD->process->pid > 0), "must be called by an user thread" );
     51assert( __FUNCTION__, (CURRENT_THREAD->process->pid > 0),
     52"must be called by an user thread" );
    5253
    5354#if DEBUG_HAL_USPACE
     
    147148    uint32_t cxy = (uint32_t)GET_CXY( k_src_xp );
    148149
    149 assert( __FUNCTION__, (CURRENT_THREAD->process->pid > 0), "must be called by an user thread" );
     150assert( __FUNCTION__, (CURRENT_THREAD->process->pid > 0),
     151"must be called by an user thread" );
    150152
    151153#if DEBUG_HAL_USPACE
     
    236238    uint32_t cxy = (uint32_t)GET_CXY( k_dst_xp );
    237239
    238 assert( __FUNCTION__, (CURRENT_THREAD->process->pid > 0), "must be called by an user thread" );
     240assert( __FUNCTION__, (CURRENT_THREAD->process->pid > 0),
     241"must be called by an user thread" );
    239242
    240243    hal_disable_irq( &save_sr );
     
    291294    uint32_t cxy = (uint32_t)GET_CXY( k_src_xp );
    292295
    293 assert( __FUNCTION__, (CURRENT_THREAD->process->pid > 0), "must be called by an user thread" );
     296assert( __FUNCTION__, (CURRENT_THREAD->process->pid > 0),
     297"must be called by an user thread" );
    294298
    295299    hal_disable_irq( &save_sr );
     
    343347    uint32_t str   = (uint32_t)u_str;
    344348
    345 assert( __FUNCTION__, (CURRENT_THREAD->process->pid > 0), "must be called by an user thread" );
     349assert( __FUNCTION__, (CURRENT_THREAD->process->pid > 0),
     350"must be called by an user thread" );
    346351
    347352    hal_disable_irq( &save_sr );
     
    352357        "mfc2   $15,   $1           \n"   /* $15 <= MMU_MODE (DTLB off)     */
    353358        "ori    $14,   $15,  0x4    \n"   /* $14 <= mode DTLB on            */
     359        "mtc2   $14,   $1                       \n"   /* set DTLB on                    */
    354360        "1:                         \n"
    355         "mtc2   $14,   $1                       \n"   /* set DTLB on                    */
    356361        "lb         $12,   0($13)       \n"   /* $12 <= one byte from u_space   */
     362        "beq    $12,   $0,   2f     \n"   /* exit loop when NUL found       */
     363        "addi   $13,   $13,  1      \n"   /* increment address              */
     364        "j                   1b     \n"   /* jump to next iteration         */
     365        "addi   %0,    %0,   1      \n"   /* increment count if not NUL     */
     366        "2:                         \n"
    357367        "mtc2   $15,   $1                       \n"   /* set DTLB off                   */
    358         "addi   $13,   $13,  1      \n"   /* increment address              */
    359         "bne    $12,   $0,   1b     \n"   /* loop until NUL found           */
    360         "addi   %0,    %0,   1      \n"   /* increment count                */
    361368        ".set reorder               \n"
    362369        : "+r"(count)
  • trunk/hal/tsar_mips32/drivers/soclib_nic.c

    r679 r686  
    11/*
    2  * soclib_nic.c - SOCLIB_NIC (Network Interface Controler) driver implementation.
     2 * soclib_nic.c - VCI_MASTER_NIC (Network Interface Controler) driver implementation.
    33 *
    44 * Author     Alain Greiner (2016,2017,2018,2019,2020)
     
    6464    remote_busylock_acquire( lock_xp );
    6565
    66     nolock_printk("\n***** chbuf %s : ptr %x / wid %d / rid %d *****\n",
    67     name, chbuf, chbuf->wid, chbuf->rid );
    68 
    69     for( i = 0 ; i < SOCLIB_NIC_CHBUF_DEPTH ; i++ )
     66    nolock_printk("\n***** chbuf %s : cxy %x / ptr %x / wid %d / rid %d *****\n",
     67    name, local_cxy , chbuf, chbuf->wid, chbuf->rid );
     68
     69    for( i = 0 ; i < CONFIG_SOCK_QUEUES_DEPTH ; i++ )
    7070    {
    7171        uint32_t * container = chbuf->cont_ptr[i];
     
    7676        if( container[511] )
    7777        {
    78             nolock_printk(" - %d : FULL  / cont_ptr %x / cont_pad [%x,%x] / plen %d\n",
    79             i, chbuf->cont_ptr[i],
    80             (uint32_t)(chbuf->cont_pad[i]>>32),
    81             (uint32_t)chbuf->cont_pad[i],
    82             container[510] );
     78            nolock_printk(" - %d : FULL  / cont_ptr %x / plen %d\n",
     79            i, chbuf->cont_ptr[i], container[510] );
    8380        }
    8481        else
    8582        {
    86             nolock_printk(" - %d : EMPTY / cont_ptr %x / cont_pad [%x,%x]\n",
    87             i, chbuf->cont_ptr[i],
    88             (uint32_t)(chbuf->cont_pad[i]>>32),
    89             (uint32_t)chbuf->cont_pad[i] );
     83            nolock_printk(" - %d : EMPTY / cont_ptr %x\n",
     84            i, chbuf->cont_ptr[i] );
    9085        }
    9186    }
     
    10196void soclib_nic_init( chdev_t * chdev )
    10297{
    103     uint32_t    i;
    104     kmem_req_t  req;
    105     ppn_t       ppn;
    106     uint64_t    padr;
     98    uint32_t * container;       // local pointer on one container
     99    uint32_t   cont_per_page;   // number of containers per page
     100    uint32_t   cont_gid;        // container global index (in chbuf)
     101    bool_t     cont_error;      // not enough memory for chbuf containers
     102
     103    ppn_t      ppn;             // used for both the chbuf descriptor and the containers
     104    uint64_t   padr;            // used for both the chbuf descriptor and the containers
     105
     106assert( __FUNCTION__ , (chdev->func == DEV_FUNC_NIC),
     107"bad func argument" );
     108
     109assert( __FUNCTION__ , (sizeof(nic_cont_t) == 2048),
     110"container size must be 2048 bytes" );
     111
     112assert( __FUNCTION__ , (CONFIG_PPM_PAGE_ORDER >= 11 ),
     113"page size cannot be smaller than container size" );
    107114
    108115    // set driver specific fields in chdev descriptor
     
    122129uint32_t   cycle = (uint32_t)hal_get_cycles();
    123130if( (is_rx == false) && DEBUG_HAL_NIC_RX < cycle )
    124 printk("\n[%s] thread[%x,%x] enter : NIC_TX channel %d / chdev %x / base %x / cycle %d\n",
    125 __FUNCTION__, this->process->pid, this->trdid, channel, chdev, nic_ptr, cycle );
     131printk("\n[%s] thread[%x,%x] enter : NIC_TX channel %d / chdev %x / cycle %d\n",
     132__FUNCTION__, this->process->pid, this->trdid, channel, chdev, cycle );
    126133if( is_rx && DEBUG_HAL_NIC_RX < cycle )
    127 printk("\n[%s] thread[%x,%x] enter : NIC_RX channel %d / chdev %x / base %x / cycle %d\n",
    128 __FUNCTION__, this->process->pid, this->trdid, channel, chdev, nic_ptr, cycle );
    129 #endif
    130 
    131     // get number of channels from hardware
     134printk("\n[%s] thread[%x,%x] enter : NIC_RX channel %d / chdev %x / cycle %d\n",
     135__FUNCTION__, this->process->pid, this->trdid, channel, chdev, cycle );
     136#endif
     137
     138    // get number of channels from NIC hardware register
    132139    uint32_t channels = hal_remote_l32( XPTR( nic_cxy,
    133140                        nic_ptr + NIC_GLOBAL_OFFSET + NIC_G_CHANNELS ));
     
    144151    if( channel >= channels )
    145152    {
    146         printk("\n[PANIC] in %s illegal channel index\n", __FUNCTION__ );
     153        printk("\n[ERROR] in %s illegal channel index\n", __FUNCTION__ );
    147154        return;
    148155    }
    149156
    150157    // allocate memory for chbuf descriptor
    151     req.type   = KMEM_KCM;
    152     req.order  = bits_log2( sizeof(nic_chbuf_t) );
    153     req.flags  = AF_KERNEL;
    154     nic_chbuf_t * chbuf = kmem_alloc( &req );
     158    nic_chbuf_t * chbuf = kmem_alloc( bits_log2( sizeof(nic_chbuf_t) ) , AF_KERNEL );
    155159
    156160    if( chbuf == NULL )
    157161    {
    158         printk("\n[PANIC] in %s : cannot allocate chbuf descriptor\n", __FUNCTION__ );
     162        printk("\n[ERROR] in %s : cannot allocate chbuf descriptor\n", __FUNCTION__ );
    159163        return;
    160164    }
     
    166170    // software L2/L3 cache coherence for chbuf WID & RID     
    167171    if( chdev_dir.iob ) dev_mmc_sync( XPTR( local_cxy , chbuf ) , 8 );
    168    
    169     // allocate containers and complete chbuf initialisation
    170     for( i = 0 ; i < SOCLIB_NIC_CHBUF_DEPTH ; i++ )
    171     {
    172         // 2048 bytes per container
    173         req.type   = KMEM_KCM;
    174         req.order  = 11;
    175         req.flags  = AF_KERNEL;
    176         uint32_t * container  = kmem_alloc( &req );
    177 
    178         if( container == NULL )
    179         {
    180             printk("\n[PANIC] in %s : cannot allocate container\n", __FUNCTION__ );
    181             return;
     172
     173    cont_error    = false;
     174    cont_gid      = 0;
     175    cont_per_page = 1 << (CONFIG_PPM_PAGE_ORDER - 11);
     176
     177    // allocate containers & complete chbuf initialisation
     178    // depending on the PPM page size, we pack several
     179    // 248 bytes containers in one single page.
     180
     181    // lopp on containers
     182    while( cont_gid < CONFIG_SOCK_QUEUES_DEPTH ) 
     183    {
     184        if( (cont_gid & (cont_per_page - 1)) == 0 )  // allocate one PPM page
     185        {
     186            container = kmem_alloc( CONFIG_PPM_PAGE_ORDER , AF_KERNEL );
     187
     188            if( container == NULL )
     189            {
     190                cont_error = true;
     191                break;
     192            }
     193        }
     194        else           // increment container base address
     195        {
     196            container = container + 512;
    182197        }
    183198
     
    190205        // compute container physical address
    191206        ppn  = ppm_base2ppn( XPTR( local_cxy , container ) );
    192         padr = ((uint64_t)ppn << CONFIG_PPM_PAGE_SHIFT) |
     207        padr = ((uint64_t)ppn << CONFIG_PPM_PAGE_ORDER) |
    193208               ((intptr_t)container & CONFIG_PPM_PAGE_MASK);
    194209
    195210        // complete chbuf initialisation       
    196         chbuf->cont_ptr[i] = container;
    197         chbuf->cont_pad[i] = padr;
     211        chbuf->cont_ptr[cont_gid] = container;
     212        chbuf->cont_pad[cont_gid] = padr;
     213
     214        // increment container index
     215        cont_gid++;
     216    }
     217
     218    // release allocated containers and chbuf if not enough memory
     219    if( cont_error )
     220    {
     221        // loop on allocated containers
     222        while( cont_gid )
     223        {
     224            // release container when required
     225            if( (cont_gid & (cont_per_page - 1)) == 0 )
     226            kmem_free( chbuf->cont_ptr[cont_gid] , CONFIG_PPM_PAGE_ORDER );
     227
     228            // decrement container index
     229            cont_gid--;
     230        }
     231
     232        // release chbuf descriptor
     233        kmem_free( chbuf , bits_log2(sizeof(nic_chbuf_t)) );
     234
     235        return;
    198236    }
    199237
     
    204242    // get NIC channel segment base and chbuf depth
    205243    uint32_t * channel_base = nic_ptr + NIC_CHANNEL_SPAN * channel;
    206     uint32_t   nbufs        = SOCLIB_NIC_CHBUF_DEPTH;
     244    uint32_t   nbufs        = CONFIG_SOCK_QUEUES_DEPTH;
    207245   
    208246    // compute chbuf physical address
    209247    ppn  = ppm_base2ppn( XPTR( local_cxy , chbuf ) );
    210     padr = ((uint64_t)ppn  << CONFIG_PPM_PAGE_SHIFT) |
     248    padr = ((uint64_t)ppn  << CONFIG_PPM_PAGE_ORDER) |
    211249           ((intptr_t)chbuf & CONFIG_PPM_PAGE_MASK);
    212250
     
    267305    thread_t * this = CURRENT_THREAD;
    268306
    269 // check calling thread == client thread
    270 assert( __FUNCTION__, (thread_xp == XPTR( local_cxy , this )), "calling thread must be the client thread");
     307assert( __FUNCTION__, (thread_xp == XPTR( local_cxy , this )),
     308"calling thread must be the client thread");
    271309 
    272310    // get command type
     
    286324
    287325// check chdev is local
    288 assert( __FUNCTION__, (dev_cxy == local_cxy), "illegal cluster for a WRITE command");
     326assert( __FUNCTION__, (dev_cxy == local_cxy),
     327"illegal cluster for a WRITE command");
    289328           
    290329            // get command arguments
     
    293332
    294333// check packet length
    295 assert( __FUNCTION__, (length <= 2040), "packet length too large");
     334assert( __FUNCTION__, (length <= 2040),
     335"packet length too large");
    296336
    297337            // get chbuf descriptor pointer
     
    313353uint32_t   cycle = (uint32_t)hal_get_cycles();
    314354if( DEBUG_HAL_NIC_TX < cycle )
    315 printk("\n[%s] thread[%x,%x] enter / WRITE / chdev %x / chbuf %x / len %d / cycle %d\n",
    316 __FUNCTION__, this->process->pid, this->trdid, dev_ptr, chbuf, length, cycle );
    317 soclib_nic_chbuf_display( chbuf , dev_ptr->name );
     355printk("\n[%s] thread[%x,%x] enter / WRITE / %s / chbuf (%x,%x) / len %d / cycle %d\n",
     356__FUNCTION__, this->process->pid, this->trdid, dev_ptr->name, local_cxy, chbuf, length, cycle );
    318357#endif
    319358            // check container STS
     
    327366cycle = (uint32_t)hal_get_cycles();
    328367if( DEBUG_HAL_NIC_TX < cycle )
    329 printk("\n[%s] thread[%x,%x] WRITE failure : NIC_TX[%d] queue full / cycle %d\n",
     368printk("\n[%s] thread[%x,%x] exit / WRITE failure : NIC_TX[%d] queue full / cycle %d\n",
    330369__FUNCTION__, this->process->pid , this->trdid , dev_ptr->channel , cycle );
    331 soclib_nic_chbuf_display( chbuf , dev_ptr->name );
    332370#endif
    333371            }
     
    346384
    347385                // update current container WID
    348                 chbuf->wid = (index + 1) % SOCLIB_NIC_CHBUF_DEPTH;
     386                chbuf->wid = (index + 1) % CONFIG_SOCK_QUEUES_DEPTH;
    349387
    350388                // software L2/L3 cache coherence for container DATA write
     
    364402cycle = (uint32_t)hal_get_cycles();
    365403if( DEBUG_HAL_NIC_TX < cycle )
    366 printk("\n[%s] thread[%x,%x] WRITE success on NIC_TX[%d] / len %d / cycle %d\n",
     404printk("\n[%s] thread[%x,%x] exit / WRITE success on NIC_TX[%d] / len %d / cycle %d\n",
    367405__FUNCTION__, this->process->pid, this->trdid, dev_ptr->channel , length, cycle );
    368 soclib_nic_chbuf_display( chbuf , dev_ptr->name );
     406if((DEBUG_HAL_NIC_TX < cycle) && (DEBUG_HAL_NIC_TX & 1))
     407putb( "64 first bytes moved to TX queue by NIC driver" , buffer , 64 );
    369408#endif
    370409            }
     
    377416
    378417// check chdev is local
    379 assert( __FUNCTION__, (dev_cxy == local_cxy), "illegal cluster for a READ command");
     418assert( __FUNCTION__, (dev_cxy == local_cxy),
     419"illegal cluster for a READ command");
    380420           
    381421            // get target buffer
     
    400440uint32_t   cycle = (uint32_t)hal_get_cycles();
    401441if( DEBUG_HAL_NIC_RX < cycle )
    402 printk("\n[%s] thread[%x,%x] enter / READ / chdev %x / chbuf %x / cycle %d\n",
    403 __FUNCTION__, this->process->pid, this->trdid, dev_ptr, chbuf, cycle );
    404 soclib_nic_chbuf_display( chbuf , dev_ptr->name );
     442printk("\n[%s] thread[%x,%x] enter / READ / %s / chbuf (%x,%x) / cycle %d\n",
     443__FUNCTION__, this->process->pid, this->trdid, dev_ptr->name, local_cxy, chbuf, cycle );
    405444#endif
    406445            // check container state
     
    414453cycle = (uint32_t)hal_get_cycles();
    415454if( DEBUG_HAL_NIC_RX < cycle )
    416 printk("\n[%s] thread[%x,%x] READ failure : NIC_RX[%d] queue empty / cycle %d\n",
     455printk("\n[%s] thread[%x,%x] exit / READ failure : NIC_RX[%d] queue empty / cycle %d\n",
    417456__FUNCTION__, this->process->pid, this->trdid, dev_ptr->channel , cycle );
    418 soclib_nic_chbuf_display( chbuf , dev_ptr->name );
    419457#endif
    420458            }
     
    436474
    437475                // update current container WID
    438                 chbuf->rid = (index + 1) % SOCLIB_NIC_CHBUF_DEPTH;
     476                chbuf->rid = (index + 1) % CONFIG_SOCK_QUEUES_DEPTH;
    439477
    440478                // software L2/L3 cache coherence for container STS write
     
    451489uint32_t   cycle = (uint32_t)hal_get_cycles();
    452490if( DEBUG_HAL_NIC_RX < cycle )
    453 printk("\n[%s] thread[%x,%x] READ success on NIC_RX[%d] queue / len %d / cycle %d\n",
     491printk("\n[%s] thread[%x,%x] exit / READ success on NIC_RX[%d] queue / len %d / cycle %d\n",
    454492__FUNCTION__, this->process->pid, this->trdid , dev_ptr->channel , length , cycle );
    455 soclib_nic_chbuf_display( chbuf , dev_ptr->name );
     493if((DEBUG_HAL_NIC_RX < cycle) && (DEBUG_HAL_NIC_RX & 1))
     494putb("64 first bytes moved from RX queue by NIC driver" , buffer , 64 );
    456495#endif
    457496            }
     
    491530            cxy_t      base_cxy = GET_CXY( base_xp );
    492531
    493             // get channel and run from the "length" and "status" arguments
     532            // get "channel" and "run" arguments from the "length" and "status" arguments
    494533            uint32_t channel = this->nic_cmd.length;
    495534            uint32_t run     = this->nic_cmd.status;
     
    613652void __attribute__ ((noinline)) soclib_nic_isr( chdev_t * chdev )
    614653{
    615     // get base, size, channel, is_rx from NIC channel device NIC
     654    // get base, size, channel, is_rx, name, and server from NIC chdev
    616655    xptr_t     base_xp = chdev->base;
    617656    uint32_t   channel = chdev->channel;
    618657    bool_t     is_rx   = chdev->is_rx;
     658    thread_t * server  = chdev->server;
    619659
    620660    // get NIC peripheral cluster and local pointer
     
    630670    uint32_t status = hal_remote_l32( XPTR( nic_cxy , ptr ) );
    631671
    632 // check status value
    633 if( is_rx &&  (status != NIC_CHANNEL_STATUS_IDLE) )
    634 printk("\n[PANIC] in %s : error reported by NIC_RX[%d]\n", __FUNCTION__, channel );
    635 if( (is_rx == false) &&  (status != NIC_CHANNEL_STATUS_IDLE) )
    636 printk("\n[PANIC] in %s : error reported by NIC_TX[%d]\n", __FUNCTION__, channel );
    637 
    638     // unblock server thread
    639     thread_t * server = chdev->server;
    640     thread_unblock( XPTR( local_cxy , server ) , THREAD_BLOCKED_ISR );
     672    // check status value
     673    if( status == NIC_CHANNEL_STATUS_ERROR )    // error reported
     674    {
    641675
    642676#if (DEBUG_HAL_NIC_RX || DEBUG_HAL_NIC_TX)
    643677uint32_t   cycle = (uint32_t)hal_get_cycles();
    644 if( is_rx && DEBUG_HAL_NIC_RX < cycle )
    645 printk("\n[%s] ISR unblocks NIC_RX[%d] server thread / cycle %d\n",
    646 __FUNCTION__, channel, cycle );
    647 if( (is_rx == false) && DEBUG_HAL_NIC_TX < cycle )
    648 printk("\n[%s] ISR unblocks NIC_TX[%d] server thread / cycle %d\n",
    649 __FUNCTION__, channel, cycle );
    650 #endif
     678printk("\n[%s] error reported for %s / status %d / cycle %d\n",
     679 __FUNCTION__ , chdev->name , status , cycle );
     680#endif
     681        server->nic_cmd.error  = 1;
     682    }
     683    else if( status != NIC_CHANNEL_STATUS_IDLE)   // no error but DMA BUSY
     684    {
     685
     686#if (DEBUG_HAL_NIC_RX || DEBUG_HAL_NIC_TX)
     687uint32_t   cycle = (uint32_t)hal_get_cycles();
     688printk("\n[%s] warning reported for %s / status %d / cycle %d\n",
     689 __FUNCTION__ , chdev->name , status , cycle );
     690#endif
     691        server->nic_cmd.error  = 0;
     692    }
     693    else
     694    {
     695
     696#if (DEBUG_HAL_NIC_RX || DEBUG_HAL_NIC_TX)
     697uint32_t   cycle = (uint32_t)hal_get_cycles();
     698printk("\n[%s] irq reported for %s / status %d / cycle %d\n",
     699 __FUNCTION__ , chdev->name , status , cycle );
     700#endif
     701        server->nic_cmd.error  = 0;
     702    }
     703
     704    // unblock server thread
     705    server->nic_cmd.status = status;
     706    thread_unblock( XPTR( local_cxy , server ) , THREAD_BLOCKED_ISR );
    651707
    652708} // end soclib_nic_isr()
  • trunk/hal/tsar_mips32/drivers/soclib_nic.h

    r658 r686  
    2626
    2727#include <chdev.h>
     28#include <kernel_config.h>
    2829#include <hal_kernel_types.h>
    2930
     
    4344 * in two memory mapped software FIFOs, called NIC_TX_QUEUE and NIC_RX_QUEUE, implemented
    4445 * as chained buffers (chbuf). Each slot in these FIFOs is a container, containing one
    45  * single packet. The number of containers, defining the queue depth, is a software defined
    46  * parameter. The data transfer unit between is a container (one single packet).
    47  *
    48  * - The "container" structure contains a 2040 bytes data buffer, the packet length, and
    49  *   the container state : full (owned by the reader) / empty (owned by the writer).
     46 * single packet. The number of containers, defining the queue depth, is defined by the
     47 * CONFIG_SOCK_QUEUES_DEPTH. The data transfer unit is one container (one single packet).
     48 *
     49 * - One container contains a 2040 bytes data buffer, the packet length (4bytes), and the
     50 *   container state (4 bytes) : full (owned by the reader) / empty (owned by the writer).
    5051 *   For each container, the state variable is used as a SET/RESET flip-flop to synchronize
    5152 *   the software server thread, and the hardware NIC DMA engines.
     
    126127
    127128/********************************************************************************************
    128  * This structure defines the chbuf descriptor, used to implement both the RX and TX packets
     129 * This structure defines the soclib_nic chbuf descriptor, used to implement the RX and TX
    129130 * queues. Each container contains one single packet, and has only two states (full/empty).
    130131 * All containers are allocated in the same cluster as the associated NIC chdev descriptor.
     
    136137 *******************************************************************************************/
    137138
    138 #define SOCLIB_NIC_CHBUF_DEPTH   8
    139 
    140139typedef struct nic_chbuf_s
    141140{
    142     uint32_t   wid;                              /*! current container write index         */
    143     uint32_t   rid;                              /*! current container read index          */
    144     uint64_t   cont_pad[SOCLIB_NIC_CHBUF_DEPTH]; /*! containers physical base addresses    */
    145     uint32_t * cont_ptr[SOCLIB_NIC_CHBUF_DEPTH]; /*! containers virtual base addresses     */
     141    uint32_t   wid;                               /*! current container write index        */
     142    uint32_t   rid;                               /*! current container read index         */
     143    uint64_t   cont_pad[CONFIG_SOCK_QUEUES_DEPTH]; /*! containers physical base addresses   */
     144    uint32_t * cont_ptr[CONFIG_SOCK_QUEUES_DEPTH]; /*! containers virtual base addresses    */
    146145}
    147146nic_chbuf_t;
    148147
    149148/********************************************************************************************
    150  * This structure defines the container descriptor format.
     149 * This structure defines the soclib_nic container descriptor format.
     150 * One container occupies exactly 2048 bytes.
    151151 *******************************************************************************************/
    152152
     
    212212
    213213/********************************************************************************************
    214  * This ISR is executed when a new RX container has been moved to an empty TX queue,
    215  * or when a TX container has been removed from a full TX queue. In both cases, it
    216  * reactivate the corresponding server thread from the BLOCKED_ISR condition.
    217  * It is also executed in case of error reported by the DMA engines accessing the TX or RX
    218  * queues. It simply print an error message on the kernel terminal.
    219  * TODO improve this error handling...
     214 * This ISR is executed in four cases :
     215 * - when a RX container has been moved to an empty RX queue by the RX DMA engine,
     216 * - when a TX container has been removed from a full TX queue by the TX DMA engine,
     217 * - when an error is reported by the RX DMA engine accessing the RX queue,
     218 * - when an error is reported by the TX DMA engine accessing the TX queue,
     219 * In all cases it simply reactivates the corresponding TX or RX server thread,
     220 * and signal the event type in writing the relevant value in the command "error" field.
    220221 ********************************************************************************************
    221222 * @ chdev     : local pointer on NIC chdev descriptor.
  • trunk/hal/tsar_mips32/drivers/soclib_pic.c

    r679 r686  
    22 * soclib_pic.c - soclib PIC driver implementation.
    33 *
    4  * Author  Alain Greiner (2016,2017,2018,2019)
     4 * Author  Alain Greiner (2016,2017,2018,2019,2020)
    55 *
    66 * Copyright (c) UPMC Sorbonne Universites
     
    3939//////////////////////////////////////////////////////////////////////////////////////
    4040
    41 extern  chdev_directory_t chdev_dir;    // defined in chdev.h / allocated in kerneL-init.c
     41extern  chdev_directory_t chdev_dir;  // defined in chdev.h / allocated in kerneL-init.c
    4242
    4343extern  iopic_input_t  iopic_input;  // defined in dev_pic.h / allocated in kernel_init.c
     
    5858    soclib_pic_cluster_t * ext_ptr = LOCAL_CLUSTER->pic_extend;
    5959
    60     assert( __FUNCTION__, (ext_ptr->first_free_wti < ext_ptr->wti_nr) ,
    61             "no free WTI found : too much external IRQs\n");
     60assert( __FUNCTION__, (ext_ptr->first_free_wti < ext_ptr->wti_nr) ,
     61"no free WTI found : too much external IRQs");
    6262
    6363    // update WTI allocator
     
    147147        if( index < LOCAL_CLUSTER->cores_nr )   // it is an IPI
    148148        {
    149             assert( __FUNCTION__, (index == core->lid) , "illegal IPI index" );
     149
     150assert( __FUNCTION__, (index == core->lid),
     151"illegal IPI index" );
    150152
    151153#if DEBUG_HAL_IRQS
     
    170172                    {
    171173                printk("\n[WARNING] in %s : no handler for WTI %d on core %d in cluster %x\n",
    172                        __FUNCTION__ , index , core->lid , local_cxy );
    173 
    174                     core->spurious_irqs ++;
     174                 __FUNCTION__ , index , core->lid , local_cxy );
    175175
    176176                // disable WTI in local XCU controller
     
    204204                {
    205205            printk("\n[WARNING] in %s : no handler for HWI %d on core %d in cluster %x\n",
    206                    __FUNCTION__ , index , core->lid , local_cxy );
    207 
    208                 core->spurious_irqs ++;
     206            __FUNCTION__ , index , core->lid , local_cxy );
    209207
    210208            // disable HWI in local XCU controller
     
    230228        index = pti_status - 1;
    231229
    232         assert( __FUNCTION__, (index == core->lid) , "unconsistent PTI index\n");
     230assert( __FUNCTION__, (index == core->lid),
     231"unconsistent PTI index\n");
    233232
    234233#if DEBUG_HAL_IRQS
     
    278277    soclib_pic_cluster_t * cluster_ext_ptr;   
    279278    soclib_pic_core_t    * core_ext_ptr;
    280     kmem_req_t             req;
    281279    uint32_t               lid;
    282280    uint32_t               idx;
     
    288286    {
    289287        // allocate memory for core extension
    290         req.type     = KMEM_KCM;
    291         req.order    = bits_log2( sizeof(soclib_pic_core_t) );
    292         req.flags    = AF_KERNEL;
    293         core_ext_ptr = kmem_alloc( &req );
     288        core_ext_ptr = kmem_alloc( bits_log2( sizeof(soclib_pic_core_t)) , AF_KERNEL );
    294289
    295290        if( core_ext_ptr == NULL )
     
    308303
    309304    // allocate memory for cluster extension
    310     req.type        = KMEM_KCM;
    311     req.order       = bits_log2( sizeof(soclib_pic_cluster_t) );
    312     req.flags       = AF_KERNEL;
    313     cluster_ext_ptr = kmem_alloc( &req );
     305    cluster_ext_ptr = kmem_alloc( bits_log2( sizeof(soclib_pic_cluster_t) ), AF_KERNEL );
    314306
    315307    if( cluster_ext_ptr == NULL )
     
    319311    }
    320312   
    321 assert( __FUNCTION__, (cluster_ext_ptr != NULL) , "cannot allocate memory for cluster extension");
    322 
    323313    // get XCU characteristics from the XCU config register
    324314    uint32_t  config = xcu_base[XCU_CONFIG<<5];
     
    380370    bool_t   is_rx   = src_chdev->is_rx;
    381371
    382     if( (func == DEV_FUNC_IOC && impl == IMPL_IOC_BDV) || (func == DEV_FUNC_NIC) ||
    383         (func == DEV_FUNC_TXT && impl == IMPL_TXT_TTY) || (func == DEV_FUNC_IOB) ) // external IRQ => WTI
     372    if( ((func == DEV_FUNC_IOC) && (impl == IMPL_IOC_BDV)) ||
     373        (func == DEV_FUNC_NIC)                             ||
     374        ((func == DEV_FUNC_TXT) && (impl == IMPL_TXT_TTY)) ||
     375        (func == DEV_FUNC_IOB) ) // external IRQ => WTI
    384376    {
    385377        // get external IRQ index
    386378        uint32_t  hwi_id = 0;   
    387379        if     (  func == DEV_FUNC_IOC            ) hwi_id = iopic_input.ioc[channel];
    388         else if(  func == DEV_FUNC_TXT &&  is_rx ) hwi_id = iopic_input.txt_rx[channel];
    389         else if(  func == DEV_FUNC_TXT && !is_rx ) hwi_id = iopic_input.txt_tx[channel];
     380        else if( (func == DEV_FUNC_TXT) &&  is_rx ) hwi_id = iopic_input.txt_rx[channel];
     381        else if( (func == DEV_FUNC_TXT) && !is_rx ) hwi_id = iopic_input.txt_tx[channel];
    390382        else if( (func == DEV_FUNC_NIC) &&  is_rx ) hwi_id = iopic_input.nic_rx[channel];
    391383        else if( (func == DEV_FUNC_NIC) && !is_rx ) hwi_id = iopic_input.nic_tx[channel];
    392384        else if(  func == DEV_FUNC_IOB            ) hwi_id = iopic_input.iob;
    393         else      assert( __FUNCTION__, false , "illegal device functionnal type\n");
     385        else
     386        {
     387            printk("\n[WARNING] from %s : illegal device / func %s / is_rx %d\n",
     388            __FUNCTION__, chdev_func_str(func), is_rx );
     389        }
    394390
    395391        // get a WTI mailbox from local XCU descriptor 
     
    420416#if DEBUG_HAL_IRQS
    421417if( DEBUG_HAL_IRQS < cycle )
    422 printk("\n[DBG] %s : %s / channel = %d / rx = %d / hwi_id = %d / wti_id = %d / cluster = %x\n",
     418printk("\n[DBG] %s : %s / channel %d / rx %d / hwi_id %d / wti_id %d / cluster %x\n",
    423419__FUNCTION__ , chdev_func_str( func ) , channel , is_rx , hwi_id , wti_id , local_cxy );
    424420#endif
    425421
    426422    }
    427     else if( (func == DEV_FUNC_DMA) || (func == DEV_FUNC_MMC) ||
     423    else if( (func == DEV_FUNC_DMA) ||
     424             (func == DEV_FUNC_MMC) ||
    428425             (func == DEV_FUNC_TXT && impl == IMPL_TXT_MTY) ||
    429426             (func == DEV_FUNC_IOC && impl == IMPL_IOC_SPI) )   // internal IRQ => HWI
     
    431428        // get internal IRQ index
    432429        uint32_t hwi_id;
    433         if( func == DEV_FUNC_DMA ) hwi_id = lapic_input.dma[channel];
     430        if( func == DEV_FUNC_DMA )      hwi_id = lapic_input.dma[channel];
    434431        else if (func == DEV_FUNC_TXT ) hwi_id = lapic_input.mtty;
    435432        else if (func == DEV_FUNC_IOC ) hwi_id = lapic_input.sdcard;
    436         else                       hwi_id = lapic_input.mmc;
     433        else                            hwi_id = lapic_input.mmc;
    437434
    438435        // register IRQ type and index in chdev
     
    453450    else
    454451    {
    455         assert( __FUNCTION__, false , "illegal device functionnal type\n" );
     452        printk("\n[WARNING] from %s : illegal device / func %s / is_rx %d / impl %d\n",
     453        __FUNCTION__, chdev_func_str(func), is_rx, impl );
    456454    }
    457455}  // end soclib_pic_bind_irq();
     
    477475        // in TSAR : XCU output [4*lid] is connected to core [lid]
    478476        hal_remote_s32( XPTR( src_chdev_cxy ,
    479                        &seg_xcu_ptr[ (XCU_MSK_HWI_ENABLE << 5) | (lid<<2) ] ) , (1 << irq_id) );
     477        &seg_xcu_ptr[ (XCU_MSK_HWI_ENABLE << 5) | (lid<<2) ] ) , (1 << irq_id) );
    480478    }
    481479    else if( irq_type == SOCLIB_TYPE_WTI )
     
    484482        // in TSAR : XCU output [4*lid] is connected to core [lid]
    485483        hal_remote_s32( XPTR( src_chdev_cxy ,
    486                        &seg_xcu_ptr[ (XCU_MSK_WTI_ENABLE << 5) | (lid<<2) ] ) , (1 << irq_id) );
     484        &seg_xcu_ptr[ (XCU_MSK_WTI_ENABLE << 5) | (lid<<2) ] ) , (1 << irq_id) );
    487485    }
    488486    else
    489487    {
    490         assert( __FUNCTION__, false , "illegal IRQ type\n" );
     488        printk("\n[WARNING] from %s : illegal IRQ type %d\n",
     489        __FUNCTION__, irq_type );
    491490    }
    492491} // end soclib_pic_enable_irq()
     
    512511        // in TSAR : XCU output [4*lid] is connected to core [lid]
    513512        hal_remote_s32( XPTR( src_chdev_cxy ,
    514                        &seg_xcu_ptr[(XCU_MSK_HWI_DISABLE << 5) | (lid<<2) ] ) , (1 << irq_id) );
     513        &seg_xcu_ptr[(XCU_MSK_HWI_DISABLE << 5) | (lid<<2) ] ) , (1 << irq_id) );
    515514    }
    516515    else if( irq_type == SOCLIB_TYPE_WTI )
     
    519518        // in TSAR : XCU output [4*lid] is connected to core [lid]
    520519        hal_remote_s32( XPTR( src_chdev_cxy ,
    521                        &seg_xcu_ptr[(XCU_MSK_WTI_DISABLE << 5) | (lid<<2) ] ) , (1 << irq_id) );
     520        &seg_xcu_ptr[(XCU_MSK_WTI_DISABLE << 5) | (lid<<2) ] ) , (1 << irq_id) );
    522521    }
    523522    else
    524523    {
    525         assert( __FUNCTION__, false , "illegal IRQ type\n" );
     524        printk("\n[WARNING] from %s : illegal IRQ type %d\n",
     525        __FUNCTION__, irq_type );
    526526    }
    527527} // end soclib_pic_enable_irq()
     
    570570}
    571571
    572 /////////////////////////
     572///////////////////////////////
    573573void soclib_pic_ack_ipi( void )
    574574{
     
    582582    uint32_t   ack  = base[ (XCU_WTI_REG << 5) | lid ];
    583583
    584     // we must make a fake use for ack value to avoid a warning
     584    // we make a fake use for ack value to avoid a warning
    585585    if( (ack + 1) == 0 ) asm volatile( "nop" );
    586586}
  • trunk/hal/x86_64/core/hal_context.c

    r457 r686  
    109109        /* Switch the VM space */
    110110        if (newproc != oldproc) {
    111                 lcr3((uint64_t)newproc->vmm.gpt.ppn << CONFIG_PPM_PAGE_SHIFT);
     111                lcr3((uint64_t)newproc->vmm.gpt.ppn << CONFIG_PPM_PAGE_ORDER);
    112112        }
    113113
  • trunk/hal/x86_64/core/hal_exception.c

    r457 r686  
    9595
    9696                        error = vmm_handle_page_fault(process,
    97                             bad_vaddr >> CONFIG_PPM_PAGE_SHIFT);
     97                            bad_vaddr >> CONFIG_PPM_PAGE_ORDER);
    9898
    9999                        x86_printf("VA=%Z ERROR=%Z\n", bad_vaddr, (uint64_t)error);
  • trunk/hal/x86_64/core/hal_gpt.c

    r635 r686  
    301301        L4dst = (pt_entry_t *)ppm_page2base(page_xp);
    302302        memcpy(&L4dst[256], &L4src[256], 256 * sizeof(pt_entry_t));
    303         L4dst[L4_SLOT_PTE] = (ppm_page2ppn(page_xp) << CONFIG_PPM_PAGE_SHIFT) |
     303        L4dst[L4_SLOT_PTE] = (ppm_page2ppn(page_xp) << CONFIG_PPM_PAGE_ORDER) |
    304304            PG_V | PG_KW | PG_NX;
    305305
     
    324324error_t hal_gpt_set_pte(gpt_t *gpt, vpn_t vpn, uint32_t attr, ppn_t ppn)
    325325{
    326         vaddr_t va = vpn << CONFIG_PPM_PAGE_SHIFT;
     326        vaddr_t va = vpn << CONFIG_PPM_PAGE_ORDER;
    327327        paddr_t pa;
    328328        kmem_req_t req;
     
    384384        }
    385385
    386         pa = ppn << CONFIG_PPM_PAGE_SHIFT;
     386        pa = ppn << CONFIG_PPM_PAGE_ORDER;
    387387        L1_BASE[pl1_i(va)] = pa | hal_gpt_attr_to_pte(attr);
    388388
     
    392392void hal_gpt_get_pte(gpt_t *gpt, vpn_t vpn, uint32_t *attr, ppn_t *ppn)
    393393{
    394         vaddr_t va = vpn << CONFIG_PPM_PAGE_SHIFT;
     394        vaddr_t va = vpn << CONFIG_PPM_PAGE_ORDER;
    395395
    396396        *attr = 0;
     
    408408                /* large page */
    409409                *attr = hal_gpt_pte_to_attr(&L2_BASE[pl2_i(va)]);
    410                 *ppn = (L2_BASE[pl2_i(va)] & PG_2MFRAME) >> CONFIG_PPM_PAGE_SHIFT;
     410                *ppn = (L2_BASE[pl2_i(va)] & PG_2MFRAME) >> CONFIG_PPM_PAGE_ORDER;
    411411        } else {
    412412                /* small page */
    413413                *attr = hal_gpt_pte_to_attr(&L1_BASE[pl1_i(va)]);
    414                 *ppn = (L1_BASE[pl1_i(va)] & PG_FRAME) >> CONFIG_PPM_PAGE_SHIFT;
     414                *ppn = (L1_BASE[pl1_i(va)] & PG_FRAME) >> CONFIG_PPM_PAGE_ORDER;
    415415        }
    416416
  • trunk/hal/x86_64/core/hal_ppm.c

    r457 r686  
    6868
    6969        // compute number of pages required to store page descriptor array
    70         uint32_t pages_tbl_nr = bytes >> CONFIG_PPM_PAGE_SHIFT;
     70        uint32_t pages_tbl_nr = bytes >> CONFIG_PPM_PAGE_ORDER;
    7171
    7272        // compute total number of reserved pages (kernel code & pages_tbl[])
Note: See TracChangeset for help on using the changeset viewer.