Changeset 623 for trunk


Ignore:
Timestamp:
Mar 6, 2019, 4:37:15 PM (5 years ago)
Author:
alain
Message:

Introduce three new types of vsegs (KCODE,KDATA,KDEV)
to map the kernel vsegs in the process VSL and GPT.
This now used by both the TSAR and the I86 architectures.

Location:
trunk
Files:
60 edited

Legend:

Unmodified
Added
Removed
  • trunk/Makefile

    r610 r623  
    44
    55-include params-soft.mk
     6
    67ifeq ($(ARCH_NAME),)
    7 $(error Please define in ARCH_NAME parameter in params-soft.mk!)
     8$(error Please define ARCH_NAME parameter in params-soft.mk!)
    89endif
    910
     
    5657MTOOLS_SKIP_CHECK := 1
    5758
    58 # Rule to generate boot.elf, kernel.elf, all user.elf files, and update virtual disk.
     59##########################################################################################
     60# Rule to generate boot.elf, kernel.elf, all user.elf files, and update the virtual disk
     61# when the corresponding sources files have been modified or destroyed.
     62# The /home directory on the virtual disk is not modified
    5963compile: dirs                              \
    60          build_disk                        \
    6164         hard_config.h                     \
    6265         build_libs                        \
     
    6871         user/idbg/build/idbg.elf          \
    6972         user/sort/build/sort.elf          \
    70          user/fft/build/fft.elf           \
     73         user/fft/build/fft.elf            \
    7174         list
    7275
     
    8588        mcopy -o -i $(DISK_IMAGE) ::/home .
    8689
     90##############################################################
    8791# Rules to delete all binary files from Unix File System
    8892# without modifying the virtual disk.
     
    119123        mmd                     -o -i $(DISK_IMAGE) ::/bin/user    || true
    120124        mmd                     -o -i $(DISK_IMAGE) ::/home        || true
    121         mcopy           -o -i $(DISK_IMAGE) Makefile ::/home
    122125        mdir             -/ -b -i $(DISK_IMAGE) ::/
    123126
     
    125128# Rules to generate hardware description files (hard_config.h,
    126129# arch_info.bin and arch_info.xml), and update the virtual disk.
    127 hard_config.h: build_disk $(ARCH)/arch_info.py
     130hard_config.h: $(ARCH)/arch_info.py
    128131        tools/arch_info/genarch.py      --arch=$(ARCH)                  \
    129132                                                                --x_size=$(X_SIZE)              \
  • trunk/boot/tsar_mips32/boot.c

    r578 r623  
    33 *
    44 * Authors :   Vu Son  (2016)
    5  *             Alain Greiner (2016, 2017,2018)
     5 *             Alain Greiner (2016,2017,2018,2019)
    66 *
    77 * Copyright (c) UPMC Sorbonne Universites
     
    2626 * This file contains the ALMOS-MKH. boot-loader for the TSAR architecture. *
    2727 *                                                                          *
    28  * It supports clusterised shared memory multi-processor architectures,     *
     28 * It supports a clusterised, shared memory, multi-processor architecture,  *
    2929 * where each processor core is identified by a composite index [cxy,lid]   *
    3030 * with one physical memory bank per cluster.                               *
    3131 *                                                                          *
    3232 * The 'boot.elf' file (containing the boot-loader binary code) is stored   *
    33  * on disk and is loaded into memory by core[0,0] (cxy = 0 / lid = 0),      *
    34  * and is copied in each other cluter by the local CP0 (lid = 0].           *
     33 * on disk (not in the FAT file system), and must be loaded into memory by  *
     34 * the preloader running on the core[0][0] (cxy = 0 / lid = 0).             *
    3535 *                                                                          *
    36  * 1) The boot-loader first phase is executed by core[0,0], while           *
    37  *    all other cores are waiting in the preloader.                         *
    38  *    It does the following tasks:                                          *
    39  *      - load into the memory bank of cluster 0 the 'arch_info.bin'        *
    40  *        file (containing the hardware architecture description) and the   *
    41  *        'kernel.elf' file, at temporary locations,                        *   
    42  *      - initializes the 'boot_info_t' structure in cluster(0,0)           *
    43  *        (there is 1 'boot_info_t' per cluster), which contains both       *
    44  *        global and cluster specific information that will be used for     *
    45  *        kernel initialisation.                                            *
    46  *      - activate CP0s in all other clusters, using IPIs.                  *
    47  *      - wait completion reports from CP0s on a global barrier.            *
     36 * The main task of the boot-loader is to load in the first physical page   *
     37 * of each cluster a copy of the kernel code (segments "kcode" and "kdata") *
     38 * and to build - in each cluster - a cluster specific description of the   *
     39 * hardware archtecture, stored in the "kdata" segment as the boot_info_t   *
     40 * structure. The "kernel.elf" and "arch_info.bin" files are supposed to be *
     41 * stored on disk in a FAT32 file system.                                   *
    4842 *                                                                          *
    49  * 2) The boot-loader second phase is then executed in parallel by all      *
    50  *    CP0s (other than core[0,0]). Each CP0 performs the following tasks:   *
    51  *      - copies into the memory bank of the local cluster the 'boot.elf',  *
    52  *        the 'arch_info.bin' (at the same addresses as the 'boot.elf' and  *
    53  *        the 'arch_info.bin' in the memory bank of the cluster(0,0), and   *
    54  *        the kernel image (at address 0x0),                                *
    55  *      - initializes the 'boot_info_t' structure of the local cluster,     *
    56  *      - activate all other cores in the same cluster (CPi).               *
    57  *      - wait local CPi completion reports on a local barrier.             *
    58  *      - report completion  on the global barrier.                         *
     43 * All cores contribute to the boot procedure, but all cores are not        *
     44 * simultaneously active:                                                   *
     45 * - in a first phase, only core[0][0] is running (core 0 in cluster 0).    *
     46 * - in a second phase, only core[cxy][0] is running in each cluster.       *
     47 * - in last phase, all core[cxy][lid] are running.                         *
    5948 *                                                                          *
    60  * 3) The boot-loader third phase is executed in parallel by all cores.     *
    61  *    In each cluster (i) the CP0                                           *
    62  *      - activates the other cores of cluster(i),                          *
    63  *      - blocks on the local barrier waiting for all local CPi to report   *
    64  *        completion on the local barrier,                                  *
    65  *      - moves the local kernel image from the temporary location to the   *
    66  *        address 0x0, (erasing the preloader code).                        *
     49 * Finally, all cores jump to the kernel_init() function that makes the     *                 
     50 * actual kernel initialisation.                                            *
    6751 *                                                                          *
    68  * 4) All cores jump to kern_init() (maybe not at the same time).           *
     52 * Implementation note:                                                     *                      *                                                                          *
     53 * To allows each core to use the local copy of both the boot code and the  *
     54 * kernel code, the boot-loader builds a minimal and temporary BPT (Boot    *
     55 * Page Table) containing only two big pages: page[0] maps the kernel code, *
     56 * and page 1 maps the boot code.                                           *
    6957 ****************************************************************************/
    7058
     
    9684 ****************************************************************************/
    9785
     86// the Boot Page Table contains two PTE1, and should be aligned on 8 Kbytes
     87
     88uint32_t                        boot_pt[2] __attribute__((aligned(2048)));
     89
    9890// synchronization variables.
    9991
    100 volatile boot_remote_spinlock_t tty0_lock;       // protect TTY0 access
    101 volatile boot_remote_barrier_t  global_barrier;  // synchronize CP0 cores
    102 volatile boot_remote_barrier_t  local_barrier;   // synchronize cores in one cluster
    103 uint32_t                        active_cp0s_nr;  // number of expected CP0s
     92volatile boot_remote_spinlock_t tty0_lock;        // protect TTY0 access
     93volatile boot_remote_barrier_t  global_barrier;   // synchronize CP0 cores
     94volatile boot_remote_barrier_t  local_barrier;    // synchronize cores in one cluster
     95uint32_t                        active_cores_nr;  // number of expected CP0s
    10496 
    10597// kernel segments layout variables
     
    114106uint32_t                        kernel_entry;    // kernel entry point
    115107
    116 // Functions called by boot_entry.S
     108// Functions
    117109
    118110extern void boot_entry( void );    // boot_loader entry point
     
    738730
    739731/*********************************************************************************
    740  * This function is called by all CP0 to activate the other CPi cores.
     732 * This function is called by all CP0s to activate the other CPi cores.
    741733 * @ boot_info  : pointer to local 'boot_info_t' structure.
    742734 *********************************************************************************/
     
    761753} // boot_wake_local_cores()
    762754
     755/*********************************************************************************
     756 * This function is called by all core[cxy][0] to initialize the Boot Page Table:
     757 * map two local big pages for the boot code and kernel code.
     758 * @ cxy    : local cluster identifier.
     759 *********************************************************************************/
     760void boot_page_table_init( cxy_t  cxy )
     761{
     762    // set PTE1 in slot[0] for kernel code
     763    uint32_t kernel_attr  = 0x8A800000;                   // flagss : V,C,X,G
     764    uint32_t kernel_ppn1  = (cxy << 20) >> 9;             // big physical page index == 0
     765    boot_pt[0]            = kernel_attr | kernel_ppn1;
     766
     767    // set PTE1 in slot[1] for boot code (no global flag)
     768    uint32_t boot_attr    = 0x8A000000;                   // flags : V,C,X
     769    uint32_t boot_ppn1    = ((cxy << 20) + 512) >> 9;     // big physical page index == 1
     770    boot_pt[1]            = boot_attr | boot_ppn1;
     771}
     772
     773/*********************************************************************************
     774 * This function is called by all cores to activate the instruction MMU,
     775 * and use the local copy of boot code.
     776 *********************************************************************************/
     777void boot_activate_ins_mmu( cxy_t cxy )
     778{
     779    // set mmu_ptpr register
     780    uint32_t ptpr = ((uint32_t)boot_pt >> 13) | (cxy << 19);
     781    asm volatile ( "mtc2   %0,   $0         \n" : : "r" (ptpr) );
     782
     783    // set ITLB bit in mmu_mode
     784    asm volatile ( "mfc2   $26,  $1         \n"
     785                   "ori    $26,  $26,  0x8  \n"
     786                   "mtc2   $26,  $1         \n" );
     787}
    763788
    764789/*********************************************************************************
     
    776801    if (lid == 0)
    777802    {
    778         /****************************************************
    779          * PHASE A : only CP0 in boot cluster executes it
    780          ***************************************************/
    781         if (cxy == BOOT_CORE_CXY)
     803        /************************************i**********************
     804         * PHASE Sequencial : only core[0][0] executes it
     805         **********************************************************/
     806        if (cxy == 0)
    782807        {
    783808            boot_printf("\n[BOOT] core[%x,%d] enters at cycle %d\n",
     
    833858            boot_check_core(boot_info, lid);
    834859
    835             // Activate other CP0s / get number of active CP0s
    836             active_cp0s_nr = boot_wake_all_cp0s() + 1;
     860// TO BE DONE
     861// core[0][0] identity maps two big pages for the boot and kernel code,
     862// boot_page_table_init( 0 );
     863
     864// TO BE DONE
     865// core[0][0] activates the instruction MMU to use the local copy of boot code
     866// boot_activate_ins_mmu( 0 );
     867
     868            // Activate other core[cxy][0] / get number of activated cores
     869            active_cores_nr = boot_wake_all_cp0s() + 1;
    837870
    838871            // Wait until all clusters (i.e all CP0s) ready to enter kernel.
    839872            boot_remote_barrier( XPTR( BOOT_CORE_CXY , &global_barrier ) ,
    840                                  active_cp0s_nr );
     873                                 active_cores_nr );
    841874
    842875            // activate other local cores
    843876            boot_wake_local_cores( boot_info );
    844 
    845 // display address extensions
    846 // uint32_t cp2_data_ext;
    847 // uint32_t cp2_ins_ext;
    848 // asm volatile( "mfc2   %0,  $24" : "=&r" (cp2_data_ext) );
    849 // asm volatile( "mfc2   %0,  $25" : "=&r" (cp2_ins_ext) );
    850 // boot_printf("\n[BOOT] core[%x,%d] CP2_DATA_EXT = %x / CP2_INS_EXT = %x\n",
    851 // cxy , lid , cp2_data_ext , cp2_ins_ext );
    852877
    853878            // Wait until all local cores in cluster ready
     
    855880                                 boot_info->cores_nr );
    856881        }
    857         /******************************************************************
    858          * PHASE B : all CP0s other than CP0 in boot cluster execute it
    859          *****************************************************************/
     882        /**************************************************************************
     883         * PHASE partially parallel : all core[cxy][0] with (cxy != 0) execute it
     884         **************************************************************************/
    860885        else
    861886        {
    862             // at this point, all INSTRUCTION address extension registers
    863             // point on cluster(0,0), but the DATA extension registers point
    864             // already on the local cluster to use the local stack.
    865             // To access the bootloader global variables we must first copy
    866             // the boot code (data and instructions) in the local cluster.
     887            // at this point, the DATA extension registers point
     888            // already on the local cluster cxy to use the local stack,
     889            // but all cores must access the code stored in cluster 0
     890
     891            // Each CP0 copies the boot code (data and instructions)
     892            // from the cluster 0 to the local cluster.
    867893            boot_remote_memcpy( XPTR( cxy           , BOOT_BASE ),
    868894                                XPTR( BOOT_CORE_CXY , BOOT_BASE ),
    869895                                BOOT_MAX_SIZE );
    870896
    871             // from now, it is safe to refer to the boot code global variables
     897            // from now, it is safe to refer to the boot global variables
    872898            boot_printf("\n[BOOT] core[%x,%d] replicated boot code at cycle %d\n",
    873899            cxy , lid , boot_get_proctime() );
    874900
    875                         // switch to the INSTRUCTION local memory space, to avoid contention.
    876             // asm volatile("mtc2  %0, $25" :: "r"(cxy));
    877 
    878             // Copy the arch_info.bin file into the local memory.
     901// TO BE DONE
     902// Each core identity maps two big pages for the boot and kernel code,
     903// boot_page_table_init( cxy );
     904
     905// Each core activates the instruction MMU to use the local copy of boot code
     906// boot_activate_ins_mmu( cxy );
     907
     908            // Each CP0 copies the arch_info.bin into the local memory.
    879909            boot_remote_memcpy(XPTR(cxy,           ARCHINFO_BASE),
    880910                               XPTR(BOOT_CORE_CXY, ARCHINFO_BASE),
     
    884914            cxy , lid , boot_get_proctime() );
    885915
    886             // Copy the kcode segment into local memory
     916            // Each CP0 copies the kcode segment into local memory
    887917            boot_remote_memcpy( XPTR( cxy           , seg_kcode_base ),
    888918                                XPTR( BOOT_CORE_CXY , seg_kcode_base ),
    889919                                seg_kcode_size );
    890920
    891             // Copy the kdata segment into local memory
     921            // Each CP0 copies the kdata segment into local memory
    892922            boot_remote_memcpy( XPTR( cxy           , seg_kdata_base ),
    893923                                XPTR( BOOT_CORE_CXY , seg_kdata_base ),
    894924                                seg_kdata_size );
    895925
    896             // Copy the kentry segment into local memory
     926            // [TO BE REMOVED<D-°>
     927            // Each CP0 copies the kentry segment into local memory
    897928            boot_remote_memcpy( XPTR( cxy           , seg_kentry_base ),
    898929                                XPTR( BOOT_CORE_CXY , seg_kentry_base ),
     
    902933            cxy , lid , boot_get_proctime() );
    903934
    904             // Get local boot_info_t structure base address.
     935            // Each CP0 get local boot_info_t structure base address.
    905936            boot_info = (boot_info_t*)seg_kdata_base;
    906937
    907             // Initialize local boot_info_t structure.
     938            // Each CP0 initializes local boot_info_t structure.
    908939            boot_info_init( boot_info , cxy );
    909940
     
    911942            cxy , lid , boot_get_proctime() );
    912943
    913             // Check core information.
     944            // Each CP0 checks core information.
    914945            boot_check_core( boot_info , lid );
    915946
    916             // get number of active clusters from BOOT_CORE cluster
    917             uint32_t count = boot_remote_lw( XPTR( BOOT_CORE_CXY , &active_cp0s_nr ) );
    918 
    919             // Wait until all clusters (i.e all CP0s) ready to enter kernel
     947            // Each CP0 get number of active clusters from BOOT_CORE cluster
     948            uint32_t count = boot_remote_lw( XPTR( 0 , &active_cores_nr ) );
     949
     950            // Wait until all clusters (i.e all CP0s) ready
    920951            boot_remote_barrier( XPTR( BOOT_CORE_CXY , &global_barrier ) , count );
    921952
    922953            // activate other local cores
    923954            boot_wake_local_cores( boot_info );
    924 
    925 // display address extensions
    926 // uint32_t cp2_data_ext;
    927 // uint32_t cp2_ins_ext;
    928 // asm volatile( "mfc2   %0,  $24" : "=&r" (cp2_data_ext) );
    929 // asm volatile( "mfc2   %0,  $25" : "=&r" (cp2_ins_ext) );
    930 // boot_printf("\n[BOOT] core[%x,%d] CP2_DATA_EXT = %x / CP2_INS_EXT = %x\n",
    931 // cxy , lid , cp2_data_ext , cp2_ins_ext );
    932955
    933956            // Wait until all local cores in cluster ready
     
    938961    else
    939962    {
    940         /***************************************************************
    941          * PHASE C: all non CP0 cores in all clusters execute it
    942          **************************************************************/
    943 
    944         // Switch to the INSTRUCTIONS local memory space
    945         // to avoid contention at the boot cluster.
    946         asm volatile("mtc2  %0, $25" :: "r"(cxy));
     963        /***********************************************************************
     964         * PHASE fully parallel : all cores[cxy][lid] with (lid! = 0) execute it
     965         **********************************************************************/
     966
     967// TO BE DONE
     968// each core activate the instruction MMU to use the local copy of the boot code
     969// boot_activate_ins_mmu( cxy );
    947970
    948971        // Get local boot_info_t structure base address.
     
    952975        boot_check_core(boot_info, lid);
    953976
    954 // display address extensions
    955 // uint32_t cp2_data_ext;
    956 // uint32_t cp2_ins_ext;
    957 // asm volatile( "mfc2   %0,  $24" : "=&r" (cp2_data_ext) );
    958 // asm volatile( "mfc2   %0,  $25" : "=&r" (cp2_ins_ext) );
    959 // boot_printf("\n[BOOT] core[%x,%d] CP2_DATA_EXT = %x / CP2_INS_EXT = %x\n",
    960 // cxy , lid , cp2_data_ext , cp2_ins_ext );
    961 
    962977        // Wait until all local cores in cluster ready
    963978        boot_remote_barrier( XPTR( cxy , &local_barrier ) , boot_info->cores_nr );
    964979    }
    965980
     981    // the "kernel_entry" global variable, set by boot_kernel_load() define
     982    // the adress of the kernel_init() function.
    966983    // Each core initialise the following registers before jumping to kernel:
    967     // - sp_29    : stack pointer on idle thread,
    968     // - c0_sr    : reset BEV bit
    969     // - a0_04    : pointer on boot_info structure
    970     // - c0_ebase : kentry_base(and jump to kernel_entry.
    971 
     984    // - gr_29    : stack pointer / kernel stack allocated in idle thread descriptor,
     985    // - c0_sr    : status register / reset BEV bit
     986    // - gr_04    : kernel_init() argument / pointer on boot_info structure
     987    // - c0_ebase : kentry_base
     988
     989    // compute "sp" from base address of idle thread descriptors array and lid.
    972990    // The array of idle-thread descriptors is allocated in the kdata segment,
    973     // just after the boot_info structure
    974     uint32_t sp;
     991    // just after the boot_info structure.
    975992    uint32_t base;
    976993    uint32_t offset = sizeof( boot_info_t );
    977994    uint32_t pmask  = CONFIG_PPM_PAGE_MASK;
    978995    uint32_t psize  = CONFIG_PPM_PAGE_SIZE;
    979 
    980     // compute base address of idle thread descriptors array
    981996    if( offset & pmask ) base = seg_kdata_base + (offset & ~pmask) + psize;
    982997    else                 base = seg_kdata_base + offset;
    983 
    984     // compute stack pointer
    985     sp = base + ((lid + 1) * CONFIG_THREAD_DESC_SIZE) - 16;
     998    uint32_t sp = base + ((lid + 1) * CONFIG_THREAD_DESC_SIZE) - 16;
     999
     1000    // get "ebase" from kerneL_info
     1001    uint32_t ebase = boot_info->kentry_base;
     1002
     1003// TO BE DONE
     1004// The cp0_ebase will not be set by the assenbly code below
     1005// when the kentry segment will be removed => done in kernel init
    9861006
    9871007    asm volatile( "mfc0  $27,  $12           \n"
     
    9971017                  : "r"(boot_info) ,
    9981018                    "r"(sp) ,
    999                     "r"(boot_info->kentry_base) ,
     1019                    "r"(ebase) ,
    10001020                    "r"(kernel_entry)
    10011021                  : "$26" , "$27" , "$29" , "$4" );
  • trunk/boot/tsar_mips32/boot_entry.S

    r439 r623  
    2323
    2424/**********************************************************************************************
    25  * This file contains the entry point of the ALMOS-MK boot-loader for TSAR architecture.      *
    26  * It supports a generic multi-clusters / multi-processors architecture                       *
     25 * This file contains the entry point of the ALMOS-MK boot-loader for TSAR architecture,      *
     26 * that is a generic multi-clusters / multi-processors architecture.                          *
    2727 *                                                                                            *
    2828 * - The number of clusters is defined by the (X_SIZE, Y_SIZE) parameters in the              *
     
    3131 *   hard_config.h file (up to 4 processors per cluster).                                     *
    3232 *                                                                                            *
    33  * This assembly code is executed by all cores. It has 2 versions (in order to see if the     *
    34  * contention created by the ARCHINFO core descriptor table scanning loops is acceptable):    *
    35  * with or without the assumption that the core hardware identifier gid has a fixed format:   *
     33 * This assembly code is executed by all cores, but at the same time, because all cores       *
     34 * are not simultaneously activated. It makes the assuption that the CPO register containing  *
     35 * the core gid (global hardware identifier) has a fixed format:                              *
     36 *    gid == (((x << Y_WIDTH) + y) << P_WIDTH) + lid                                          *
    3637 *                                                                                            *
    37  * - Version with fixed format: gid == (((x << Y_WIDTH) + y) << PADDR_WIDTH) + lid            *
    38  *   It does 3 things:                                                                        *
    39  *      + It initializes the stack pointer depending on the lid extracted from the gid,       *
    40  *        using the BOOT_STACK_BASE and BOOT_STACK_SIZE parameters defined in the             *
    41  *        'boot_config.h' file,                                                               *
    42  *      + It changes the value of the address extension registers using the cxy extracted     *
    43  *        from the gid,                                                                       *
    44  *      + It jumps to the boot_loader() function defined in the 'boot.c' file and passes 2    *
    45  *        arguments which are the cxy and lid of each core to this function.                  *
    46  *                                                                                            *
    47  * - Version without fixed format                                                             *
    48  *   It has to perform an additional step in order to extract the (cxy,lid) values from the   *
    49  *   arch_info.bin structure that has been loaded in the cluster (0,0) memory by the bscpu.   *
    50  *      + Each core other than the bscpu scans the core descriptor table in the arch_info.bin *
    51  *        structure to make an associative search on the (gid), and get the (cxy,lid).        *
    52  *      + It initializes the stack pointer depending on the lid, using the BOOT_STACK_BASE    *
    53  *        and BOOT_STACK_SIZE parameters defined in the 'boot_config.h' file,                 *
    54  *      + It changes the value of the address extension registers using cxy obtained          *
    55  *        previously,                                                                         *
    56  *      + It jumps to the boot_loader() function defined in the 'boot.c' file and passes 2    *
    57  *        arguments which are the cxy and lid of each core to this function.                  *
     38 * It does 3 things:                                                                          *
     39 * - It initializes the stack pointer depending on the lid extracted from the gid,            *
     40 *   using the BOOT_STACK_BASE and BOOT_STACK_SIZE parameters defined in the                  *
     41 *   'boot_config.h' file,                                                                    *
     42 * - It changes the value of the DATA address extension register using the cxy extracted      *
     43 *   from the gid,                                                                            *
     44 * - It jumps to the boot_loader() function defined in the 'boot.c' file, passing the two     *
     45 *   arguments (cxy and lid).                                                                 *
    5846 *********************************************************************************************/
    5947
     
    7260boot_entry:
    7361
    74 #if USE_FIXED_FORMAT
    75 
    76 /*************
    77  * VERSION 1 *
    78  *************/
    79 
    80     /*
    81      * Get (cxy, lid) values from gid contained in coprocessor0 register.
    82      */
     62    /* Get (cxy, lid) values from gid contained in CP0 register  */
    8363
    8464    mfc0    k0,     CP0_PROCID         
     
    8767    srl     t2,     k0,     P_WIDTH                     /* t2 <= cxy                        */
    8868   
    89     /* Initialize stack pointer from previously retrieved lid value  */
     69    /* Initialize stack pointer from lid value  */
    9070   
    9171    la      t0,     BOOT_STACK_BASE                     /* t0 <= BOOT_STACK_BASE            */
     
    9373    multu   k1,     t1
    9474    mflo    k0                                          /* k0 <= BOOT_STACK_SIZE * lid      */
    95     subu    sp,     t0,     k0                          /* P[cxy,lid] stack top initialized */
     75    subu    sp,     t0,     k0                          /* P[cxy,lid] sp initialized        */
    9676
    97     /* Switch to local DSPACE by changing the value of the address extension registers  */
     77    /* Switch to local DSPACE by changing the value of the address extension register       */
    9878
    9979    mtc2    t2,     CP2_DATA_PADDR_EXT
    10080
    101     /* Jump to boot_loader() function after passing 2 arguments in the registers  */
     81    /* Jump to boot_loader() function after passing (cxy,lid) arguments in the registers    */
    10282
    10383    or      a0,     zero,   t1                          /* a0 <= lid                        */     
     
    10787    nop
    10888
    109 #else
    110 
    111 /*************
    112  * VERSION 2 *
    113  *************/
    114 
    115     /* Test if this is bscpu  */
    116 
    117     mfc0    k0,     CP0_PROCID         
    118     andi    k0,     k0,     0xFFF                       /* k0 <= gid                        */
    119 
    120     li      t1,     BOOT_CORE_GID                       /* t1 <= bscpu gid                  */
    121     or      t3,     zero,   zero                        /* t3 <= bscpu lid = 0              */
    122     beq     k0,     t1,     bscpu_exit                  /* if bscpu, skip scanning core tbl */
    123     li      t4,     BOOT_CORE_CXY                       /* t4 <= bscpu cxy                  */
    124 
    125     /* Get base address of the core descriptor table in 'arch_info.bin' file */
    126 
    127     la      t0,     ARCHINFO_BASE                       /* t0 <= ARCHINFO_BASE              */
    128     li      t1,     0x80                                /* t1 <= ARCHINFO_HEADER_SIZE       */
    129     addu    t2,     t0,     t1                          /* t2 <= ARCHINFO_CORE_BASE         */
    130 
    131     /* scan the core descriptor table if this is not bscpu. TODO If not found?  */
    132 
    133     li      t3,     0x8                                 /* t3 <= ARCHINFO_CORE_SIZE         */
    134    
    135 scanning_core_table:
    136     lw      t1,     0(t2)                               /* t1 <= archinfo_core.gid          */
    137     bne     t1,     k0,     scanning_core_table         /* if (t1 != k0) => loop            */
    138     addu    t2,     t2,     t3                          /* t2 <= @ next archinfo_core       */
    139 
    140     /* Get (cxy, lid) values from the found core descriptor  */
    141    
    142     lw      t3,     -8(t2)                              /* t3 <= lid                        */
    143     lw      t4,     -4(t2)                              /* t4 <= cxy                        */
    144 
    145     /* Initialize stack pointer from previously retrieved lid value  */
    146 
    147 bscpu_exit:   
    148     la      t0,     BOOT_STACK_BASE                     /* t0 <= BOOT_STACK_BASE            */
    149     li      k1,     BOOT_STACK_SIZE                     /* k1 <= BOOT_STACK_SIZE            */
    150     multu   k1,     t3
    151     mflo    k0                                          /* k0 <= BOOT_STACK_SIZE * lid      */
    152     subu    sp,     t0,     k0                          /* P[cxy,lid] stack top initialized */
    153 
    154     /* Switch to local DSPACE by changing the value of the address extension registers  */
    155 
    156     mtc2    t4,     CP2_DATA_PADDR_EXT
    157 
    158     /* Jumping to boot_loader() function after passing 2 arguments in registers */
    159 
    160     or      a0,     zero,   t3                          /* a0 <= lid                        */     
    161     or      a1,     zero,   t4                          /* a1 <= cxy                        */
    162     la      ra,     boot_loader
    163     jr      ra
    164     nop
    165 
    166 #endif
    167 
    16889    .end boot_entry
    16990
  • trunk/hal/generic/hal_gpt.h

    r587 r623  
    7777/****************************************************************************************
    7878 * This function allocates physical memory for first level page table (PT1),
    79  * and initializes the page table descriptor.
     79 * and initializes the GPT descriptor, creating an empty GPT.
    8080 ****************************************************************************************
    8181 * @ gpt     : pointer on generic page table descriptor.
     
    126126
    127127/****************************************************************************************
    128  * This function map a - local or remote - GPT entry identified by its VPN, from values
    129  * defined by the <ppn> and <attr> arguments. It allocates physical memory in remote
    130  * cluster for the GPT PT2, using a RPC_PMEM_GET_PAGES, if required.
    131  ****************************************************************************************
    132  * @ gpt       : [in] pointer on the page table
     128 * This function maps in a - local or remote - GPT identified by the <gpt_xp> argument
     129 * an entry identified by the <vpn> argument, as defined by <ppn> and <attr> arguments.
     130 * It allocates physical memory for the GPT PT2, using a RPC_PMEM_GET_PAGES if required.
     131 ****************************************************************************************
     132 * @ gpt_xp    : [in] pointer on the page table
    133133 * @ vpn       : [in] virtual page number
    134134 * @ attr      : [in] generic attributes
     
    154154/****************************************************************************************
    155155 * This function returns in the <attr> and <ppn> arguments the current values stored
    156  * in a -local or remote - GPT entry, identified by the <gpt> and <vpn> arguments.
     156 * in a - local or remote - GPT entry, identified by the <gpt> and <vpn> arguments.
    157157 ****************************************************************************************
    158158 * @ gpt_xp    : [in]  extended pointer on the page table
  • trunk/hal/generic/hal_special.h

    r619 r623  
    3131
    3232struct thread_s;
     33struct gpt_s;
    3334
    3435///////////////////////////////////////////////////////////////////////////////////////////
     
    3738// ALMOS-MKH uses the following API to access the core protected registers.
    3839///////////////////////////////////////////////////////////////////////////////////////////
     40
     41/*****************************************************************************************
     42 * This function initialise - for architectures requiring it - the protected register(s)
     43 * containing the kernel_entry adresse(s) for interrupts / exceptions / syscalls.
     44 ****************************************************************************************/
     45void hal_set_kentry( void );
     46
     47/*****************************************************************************************
     48 * This function initializes - for architectures requiring it - the MMU registers
     49 * as required by the target architecture to execute the kernel threads attached
     50 * to kernel process zero. It is called by all cores in the kernel_init() function.
     51 *****************************************************************************************
     52 * @ gpt :  local pointer on the kernel page table descriptor.
     53 ****************************************************************************************/
     54void hal_mmu_init( struct gpt_s * gpt );
    3955
    4056/*****************************************************************************************
     
    103119/*****************************************************************************************
    104120 * This function makes an uncachable read to a 32 bits variable in local memory.
     121 *****************************************************************************************
    105122 * @ ptr     : pointer on the variable
    106123 * @ returns the value
     
    137154/*****************************************************************************************
    138155 * This function returns information on MMU exceptions :
     156 *****************************************************************************************
    139157 * @ mmu_ins_excp_code : [out] instruction fetch exception code
    140158 * @ mmu_ins_bad_vaddr : [out] instruction fetch faulty virtual address
  • trunk/hal/generic/hal_vmm.h

    r457 r623  
    11/*
    2  * hal_vmm.h - Generic Virtual Memory Manager initialisation
     2 * hal_vmm.h - Kernel Virtual Memory Manager initialisation
    33 *
    4  * Authors  Alain Greiner (2016,2017)
     4 * Authors  Alain Greiner (2016,2017,2018,2019)
    55 *
    66 * Copyright (c) UPMC Sorbonne Universites
     
    2222 */
    2323
    24 #ifndef _HAL_PPM_H_
    25 #define _HAL_PPM_H_
     24#ifndef _HAL_VMM_H_
     25#define _HAL_VMM_H_
    2626
    2727#include <hal_kernel_types.h>
     
    2929
    3030/////////////////////////////////////////////////////////////////////////////////////////
    31 //    Generic Virtual Memory Manager initialisation (implementation in hal_vmm.c)
     31//    Kernel Virtual Memory Manager initialisation (implementation in hal_vmm.c)
    3232//
    3333// Any arch-specific implementation must implement this API.
     
    3636/****  Forward declarations  ****/
    3737
    38 struct vmm_s;
     38struct process_s;
     39struct boot_info_s;
    3940
    4041/****************************************************************************************
    41  * This function makes all architecture specific initialisations
    42  * in the VSL (Virtual segments List) and in the GPT (Generic Page Table).
     42 * Depending on the hardware architecture, this function creates (i.e. allocates memory
     43 * and initializes) the VSL (Virtual segments List) and the GPT (Generic Page Table),
     44 * for all vsegs required by the kernel process.
    4345 ****************************************************************************************
    44  * @ vmm   : pointer on virtual memory manager.
     46 * @ info  : local pointer on boot_info (for kernel segments base & size).
    4547 * @ return 0 if success / return ENOMEM if failure.
    4648 ***************************************************************************************/
    47 error_t hal_vmm_init( struct vmm_s * vmm );
     49error_t hal_vmm_kernel_init( struct boot_info_s * info );
    4850
    49 #endif  /* HAL_PPM_H_ */
     51/****************************************************************************************
     52 * Depending on the hardware architecture, this function updates the VMM of an user
     53 * process identified by the <process> argument. It registers in VSL and GPT all
     54 * kernel vsegs required by this architecture.
     55 ****************************************************************************************
     56 * @ process   : local pointer on user process descriptor.
     57 * @ return 0 if success / return ENOMEM if failure.
     58 ***************************************************************************************/
     59error_t hal_vmm_kernel_update( struct process_s * process );
     60
     61#endif  /* HAL_VMM_H_ */
  • trunk/hal/tsar_mips32/core/hal_gpt.c

    r611 r623  
    141141#endif
    142142
    143     // check page size
    144     assert( (CONFIG_PPM_PAGE_SIZE == 4096) ,
    145     "for TSAR, the page size must be 4 Kbytes\n" );
     143// check page size
     144assert( (CONFIG_PPM_PAGE_SIZE == 4096) , "for TSAR, the page size must be 4 Kbytes\n" );
    146145
    147146    // allocates 2 physical pages for PT1
     
    287286    vpn_t      vpn;
    288287
    289     assert( (process != NULL) , "NULL process pointer\n");
     288// check argument
     289assert( (process != NULL) , "NULL process pointer\n");
    290290
    291291    // get pointer on gpt
     
    295295    pt1 = (uint32_t *)gpt->ptr;
    296296
    297     printk("\n***** Generic Page Table for process %x : &gpt = %x / &pt1 = %x\n\n",
     297    printk("\n***** Tsar Page Table for process %x : &gpt = %x / &pt1 = %x\n\n",
    298298    process->pid , gpt , pt1 );
    299299
     
    334334
    335335
     336/////////////////////////////////////////////////////////////////////////////////////
     337// FOr the TSAR architecture, this function allocates a first level PT1 (8 Kbytes),
     338// and maps one single big page for the kerne code segment in slot[0].
     339/////////////////////////////////////////////////////////////////////////////////////
     340void hal_gpt_build_kpt( cxy_t   cxy,
     341                        gpt_t * gpt )
     342{
     343    error_t error;
     344
     345    // allocate memory for one gpt
     346    error = hal_gpt_create( gpt );
     347
     348    if( error )
     349    {
     350        printk("\n[PANIC] in %s : cannot allocate kernel GPT in cluster %x\n",
     351        __FUNCTION__ , cxy );
     352        hal_core_sleep();
     353    }
     354
     355    // compute attr and ppn for one PTE1
     356    uint32_t attr  = 0xCA800000;           // bits : V,T,C,X,G
     357    uint32_t ppn   = (cxy << 20) >> 9;
     358
     359    // set PTE1
     360    error = hal_gpt_set_pte( XPTR( cxy , gpt ) , 0 , attr , ppn );
     361
     362    if( error )
     363    {
     364        printk("\n[PANIC] in %s : cannot initialize kernel GPT in cluster %x\n",
     365        __FUNCTION__ , cxy );
     366        hal_core_sleep();
     367    }
     368}
     369
    336370//////////////////////////////////////////
    337371error_t hal_gpt_set_pte( xptr_t    gpt_xp,
     
    390424        if( small == 0 )     // map a big page in PT1
    391425    {
    392         assert( (pte1 == 0) ,
    393                 "try to set a big page in a mapped PT1 entry / PT1[%d] = %x\n", ix1 , pte1 );
    394      
     426
     427// check PT1 entry not mapped
     428assert( (pte1 == 0) , "try to set a big page in a mapped PT1 entry\n" );
     429
     430// check VPN aligned
     431assert( (ix2 == 0) , "illegal vpn for a big page\n" );
     432
     433// check PPN aligned
     434assert( ((ppn & 0x1FF) == 0) , "illegal ppn for a big page\n" );
     435
    395436        // set the PTE1 value in PT1
    396437        pte1 = (tsar_attr  & TSAR_MMU_PTE1_ATTR_MASK) | ((ppn >> 9) & TSAR_MMU_PTE1_PPN_MASK);
  • trunk/hal/tsar_mips32/core/hal_special.c

    r619 r623  
    3333struct thread_s;
    3434
     35
     36//////////////////////////////////////////////////////////////////////////////////
     37//   Extern global variables
     38//////////////////////////////////////////////////////////////////////////////////
     39
     40extern cxy_t local_cxy;
     41extern void  hal_kentry_enter( void );
     42
     43/////////////////////////////////////////////////////////////////////////////////
     44// For the TSAR architecture, this function register the physical address of
     45// the first level page table (PT1) in the PTPR register.
     46// It activates the intructions MMU, and de-activates the data MMU.
     47/////////////////////////////////////////////////////////////////////////////////
     48void hal_mmu_init( gpt_t * gpt )
     49{
     50
     51    // set PT1 base address in mmu_ptpr register
     52    uint32_t ptpr = (((uint32_t)gpt->ptr) >> 13) | (local_cxy << 19);
     53    asm volatile ( "mtc2   %0,   $0         \n" : : "r" (ptpr) );
     54
     55    // set ITLB | ICACHE | DCACHE bits in mmu_mode register
     56    asm volatile ( "ori    $26,  $0,  0xB   \n"
     57                   "mtc2   $26,  $1         \n" );
     58}
     59
     60////////////////////////////////////////////////////////////////////////////////
     61// For the TSAR architecture, this function registers the address of the
     62// hal_kentry_enter() function in the MIPS32 cp0_ebase register.
     63////////////////////////////////////////////////////////////////////////////////
     64void hal_set_kentry( void )
     65{
     66    uint32_t kentry = (uint32_t)(&hal_kentry_enter);
     67
     68    asm volatile("mtc0   %0,  $15,  1" : : "r" (kentry) );
     69}
     70
    3571////////////////////////////////
    3672inline gid_t hal_get_gid( void )
  • trunk/hal/tsar_mips32/core/hal_vmm.c

    r587 r623  
    22 * hal_vmm.c - Virtual Memory Manager Initialisation for TSAR
    33 *
    4  * Authors  Alain Greiner (2016,2017)
     4 * Authors  Alain Greiner (2016,2017,2018,2019)
    55 *
    66 * Copyright (c) UPMC Sorbonne Universites
     
    2626#include <hal_vmm.h>
    2727#include <hal_gpt.h>
     28#include <process.h>
    2829#include <vseg.h>
    2930#include <xlist.h>
     
    3233
    3334//////////////////////////////////////////////////////////////////////////////////////////
    34 // This file contains the TSAR specific code to initialize the Virtual Memory Manager.
    35 // The "kentry" vseg contains the kernel code executed when a core enter/exit the kernel,
    36 // in case of Interrupt, Exception, or Syscall.
    37 // For the TSAR architecture, the kernel uses physical addresses, and this code must be
    38 // identity mapped. The following function is called by the generic vmm_init() function
    39 // and identity map all pages of the "kentry" vseg.
    40 // We dont take the locks protecting the VSL and the GPT, because there is no concurrent
    41 // accesses to VMM during VMM initialization.
     35// This file contains the TSAR specific code used to initialize the kernel process VMM,
     36// or to update an user process VMM with informations related to the kernel vsegs.
     37// As the TSAR architure does not use the DATA MMU, but use only the DATA extension
     38// address register to access local and remote kernel data, the kernel VSL contains only
     39// one "kcode" segment, and the kernel GPT contains only one big page in PT1[0] slot.
    4240//////////////////////////////////////////////////////////////////////////////////////////
    4341
    44 ////////////////////////////////////
    45 error_t  hal_vmm_init( vmm_t * vmm )
     42// extern global variables
     43extern process_t process_zero;
     44
     45//////////////////////////////////////////////////////////////////////////////////////////
     46// This function is called by the process_zero_init() function during kernel_init.
     47// It initializes the VMM of the kernel proces_zero (containing all kernel threads)
     48// in the local cluster.
     49//////////////////////////////////////////////////////////////////////////////////////////
     50error_t  hal_vmm_kernel_init( boot_info_t * info )
    4651{
    47     error_t error;
     52    error_t   error;
    4853
    49     // map all pages of "kentry" vseg
    50     uint32_t vpn;
    51     uint32_t attr;
    52     attr = GPT_MAPPED | GPT_SMALL | GPT_EXECUTABLE | GPT_CACHABLE | GPT_GLOBAL;
    53     for( vpn = CONFIG_VMM_KENTRY_BASE;
    54          vpn < (CONFIG_VMM_KENTRY_BASE + CONFIG_VMM_KENTRY_SIZE); vpn++ )
     54    // get pointer on kernel GPT
     55    gpt_t * gpt = &process_zero.vmm.gpt;
     56
     57    // get cluster identifier
     58    cxy_t cxy = local_cxy;
     59
     60    // allocate memory for kernel GPT
     61    error = hal_gpt_create( gpt );
     62
     63    if( error )
    5564    {
    56         error = hal_gpt_set_pte( XPTR( local_cxy , &vmm->gpt ),
    57                                  vpn,
    58                                  attr,
    59                                  (local_cxy<<20) | (vpn & 0xFFFFF) );
    60 
    61         if( error ) return error;
     65        printk("\n[PANIC] in %s : cannot allocate kernel GPT in cluster %x\n",
     66        __FUNCTION__ , cxy );
     67        hal_core_sleep();
    6268    }
    6369
    64     // scan the VSL to found the "kentry" vseg
    65     xptr_t         root_xp = XPTR( local_cxy , &vmm->vsegs_root );
    66     xptr_t         iter_xp;
    67     xptr_t         vseg_xp;
    68     vseg_t       * vseg;
    69     bool_t         found = false;
    70  
    71     XLIST_FOREACH( root_xp , iter_xp )
     70    // compute attr and ppn for one PTE1
     71    uint32_t attr  = 0x8A800000;           // bits : V,C,X,G
     72    uint32_t ppn   = (cxy << 20) >> 9;     // physical page index is 0
     73
     74    // set PTE1  in slot[0]
     75    error = hal_gpt_set_pte( XPTR( cxy , gpt ) , 0 , attr , ppn );
     76
     77    if( error )
    7278    {
    73         vseg_xp = XLIST_ELEMENT( iter_xp , vseg_t , xlist );
    74         vseg    = (vseg_t *)GET_PTR( vseg_xp );
    75 
    76         // set the IDENT flag in "kentry" vseg descriptor
    77         if( vseg->vpn_base == CONFIG_VMM_KENTRY_BASE )
    78         {
    79             vseg->flags |= VSEG_IDENT;
    80             found = true;
    81             break;
    82         }
     79        printk("\n[PANIC] in %s : cannot initialize kernel GPT in cluster %x\n",
     80        __FUNCTION__ , cxy );
     81        hal_core_sleep();
    8382    }
    8483
    85     if( found == false ) return 0XFFFFFFFF;
    86 
    87     return 0;
     84    // create kcode vseg and register it in kernel VSL
     85    vseg_t * vseg = vmm_create_vseg( &process_zero,
     86                                     VSEG_TYPE_CODE,
     87                                     info->kcode_base,
     88                                     info->kcode_size,
     89                                     0, 0,                  // file ofset and file size (unused)
     90                                     XPTR_NULL,             // no mapper
     91                                     local_cxy );
     92    if( vseg == NULL )
     93    {
     94        printk("\n[PANIC] in %s : cannot register vseg to VSL in cluster %x\n",
     95        __FUNCTION__ , cxy );
     96        hal_core_sleep();
     97    }
    8898
    8999}  // end hal_vmm_init()
    90100
     101//////////////////////////////////////////////////////////////////////////////////////////
     102// This function is called by the vmm_init() function to update the VMM of an user
     103// process identified by the <process> argument.
     104// It registers in the user VSL the "kcode" vseg, registered in the local kernel VSL,
     105// and register in the user GPT the big page[0] mapped in the local kernel GPT.
     106//////////////////////////////////////////////////////////////////////////////////////////
     107error_t hal_vmm_kernel_update( process_t * process )
     108{
     109    error_t error;
     110    uint32_t attr;
     111    uint32_t ppn;
    91112
     113// TODO check ppn value in kernel GPT (must be 0)
     114
     115    // get cluster identifier
     116    cxy_t cxy = local_cxy;
     117
     118    // get extended pointer on user GPT
     119    xptr_t gpt_xp = XPTR( cxy , &process->vmm.gpt );
     120
     121    // get ppn and attributes from slot[0] in kernel GPT
     122    hal_gpt_get_pte( gpt_xp , 0 , &attr , &ppn );
     123
     124// check ppn and attributes
     125assert( (attr == 0x8A800000) && (ppn == ((cxy << 20) >> 9)),  __FUNCTION__,
     126"bad ppn = %x or attr = %x in slot[0] of kernel GPT\n", ppn , attr );
     127 
     128    // update user GPT : set PTE1 in slot[0]
     129    error = hal_gpt_set_pte( gpt_xp , 0 , attr , ppn );
     130
     131    if( error )
     132    {
     133        printk("\n[ERROR] in %s : cannot update GPT in cluster %x\n",
     134        __FUNCTION__ , cxy );
     135        return -1;
     136    }
     137
     138    // get pointer on the unique vseg registered in kernel VSL
     139    xptr_t root_xp = XPTR( cxy , &process_zero.vmm.vsegs_root );
     140    vseg_t * vseg = XLIST_FIRST( root_xp , vseg_t , xlist );
     141
     142// check vsegs_nr
     143assert( (process_zero.vmm.vsegs_nr == 1 ) , __FUNCTION__,
     144"bad vsegs number in kernel VSL\n" );
     145
     146    // update user VSL : register one new vseg for kcode
     147    vseg_t * new = vmm_create_vseg( process,
     148                                    vseg->type,
     149                                    vseg->min,
     150                                    vseg->max - vseg->min,
     151                                    0, 0,                  // file ofset and file size (unused)
     152                                    XPTR_NULL,             // no mapper
     153                                    local_cxy );
     154    if( new == NULL )
     155    {
     156        printk("\n[ERROR] in %s : cannot update VSL in cluster %x\n",
     157        __FUNCTION__ , cxy );
     158        return -1;
     159    }
     160}
     161
     162
  • trunk/hal/tsar_mips32/kernel.ld

    r570 r623  
    44 * loadable segments, that MUST be identity mapped for the TSAR architecture.
    55 *
    6  * WARNING the seg_kentry_base and seg_kcode_base defined below must be kept coherent
     6 * WARNING : the seg_kentry_base and seg_kcode_base defined below must be coherent
    77 * with the values defined in the boot_config.h file used by the TSAR bootloader.
    88 **************************************************************************************/
  • trunk/kernel/fs/devfs.c

    r614 r623  
    33 *
    44 * Author   Mohamed Lamine Karaoui (2014,2015)
    5  *          Alain Greiner (2016,2017)
     5 *          Alain Greiner (2016,2017,2018,2019)
    66 *
    77 * Copyright (c) Sorbonne Universites
     
    9191                        xptr_t * devfs_external_inode_xp )
    9292{
    93     error_t  error;
    94     xptr_t   unused_xp;   // required by vfs_add_child_in_parent()
     93    error_t       error;
     94    xptr_t        unused_xp;   // required by vfs_add_child_in_parent()
     95    vfs_inode_t * inode;
    9596
    9697    // create DEVFS "dev" inode in cluster 0
    9798    error = vfs_add_child_in_parent( 0,                // cxy
    98                                      INODE_TYPE_DIR,
    9999                                     FS_TYPE_DEVFS,
    100100                                     root_inode_xp,
     
    103103                                     devfs_dev_inode_xp );
    104104
     105    // update inode "type" field
     106    inode = GET_PTR( *devfs_dev_inode_xp );
     107    inode->type = INODE_TYPE_DIR;
     108 
    105109    // create dentries <.> and <..> in <dev>
    106110    error |= vfs_add_special_dentries( *devfs_dev_inode_xp,
    107111                                       root_inode_xp );
    108112
    109 // check success
    110 assert( (error == 0) , "cannot create <dev>\n" );
     113    if( error )
     114    {
     115        printk("\n[PANIC] in %s : cannot create <dev> directory\n", __FUNCTION__ );
     116        hal_core_sleep();
     117    }
    111118
    112119#if DEBUG_DEVFS_GLOBAL_INIT
     
    120127    // create DEVFS "external" inode in cluster 0
    121128    error = vfs_add_child_in_parent( 0,               // cxy
    122                                      INODE_TYPE_DIR,
    123129                                     FS_TYPE_DEVFS,
    124130                                     *devfs_dev_inode_xp,
     
    127133                                     devfs_external_inode_xp );
    128134
     135    // update inode "type" field
     136    inode = GET_PTR( *devfs_external_inode_xp );
     137    inode->type = INODE_TYPE_DIR;
     138 
    129139    // create dentries <.> and <..> in <external>
    130140    error |= vfs_add_special_dentries( *devfs_external_inode_xp,
    131141                                       *devfs_dev_inode_xp );
    132142
    133 // check success
    134 assert( (error == 0) , "cannot create <external>\n" );
     143    if( error )
     144    {
     145        printk("\n[PANIC] in %s : cannot create <external> directory\n", __FUNCTION__ );
     146        hal_core_sleep();
     147    }
    135148
    136149#if DEBUG_DEVFS_GLOBAL_INIT
     
    153166    chdev_t     * chdev_ptr;
    154167    xptr_t        inode_xp;
    155     cxy_t         inode_cxy;
    156168    vfs_inode_t * inode_ptr;
    157169    uint32_t      channel;
     
    171183
    172184    error = vfs_add_child_in_parent( local_cxy,
    173                                      INODE_TYPE_DIR,
    174185                                     FS_TYPE_DEVFS,
    175186                                     devfs_dev_inode_xp,
     
    178189                                     devfs_internal_inode_xp );
    179190
     191    // set inode "type" field
     192    inode_ptr = GET_PTR( *devfs_internal_inode_xp );
     193    inode_ptr->type = INODE_TYPE_DEV;
     194 
    180195    // create dentries <.> and <..> in <internal>
    181196    error |= vfs_add_special_dentries( *devfs_internal_inode_xp,
    182197                                       devfs_dev_inode_xp );
    183198
    184 // check success
    185 assert( (error == 0) , "cannot create <external>\n" );
     199    if( error )
     200    {
     201        printk("\n[PANIC] in %s : cannot create <internal> directory\n", __FUNCTION__ );
     202        hal_core_sleep();
     203    }
    186204
    187205#if DEBUG_DEVFS_LOCAL_INIT
     
    199217        chdev_cxy = GET_CXY( chdev_xp );
    200218
    201 assert( (chdev_cxy == local_cxy ), "illegal MMC chdev in cluster %x\n", local_cxy );
     219        if( chdev_cxy != local_cxy )
     220        {
     221            printk("\n[PANIC] in %s : illegal MMC chdev in cluster %x\n",
     222            __FUNCTION__, local_cxy );
     223            hal_core_sleep();
     224        }
    202225
    203226        error = vfs_add_child_in_parent( local_cxy,
    204                                          INODE_TYPE_DEV,
    205227                                         FS_TYPE_DEVFS,
    206228                                         *devfs_internal_inode_xp,
     
    209231                                         &inode_xp );
    210232
    211 assert( (error == 0) , "cannot create MMC inode\n" );
    212 
    213         // update child inode "extend" field
    214         inode_cxy = GET_CXY( inode_xp );
     233        if( error )
     234        {
     235            printk("\n[PANIC] in %s : cannot create MMC inode in cluster %x\n",
     236            __FUNCTION__, local_cxy );
     237            hal_core_sleep();
     238        }
     239
     240        // update child inode "extend" and "type" fields
    215241        inode_ptr = GET_PTR( inode_xp );
    216         hal_remote_spt( XPTR( inode_cxy , &inode_ptr->extend ) , chdev_ptr );
     242        inode_ptr->extend = chdev_ptr;
     243        inode_ptr->type   = INODE_TYPE_DEV;
    217244       
    218245#if DEBUG_DEVFS_LOCAL_INIT
     
    234261            chdev_cxy = GET_CXY( chdev_xp );
    235262
    236 assert( (chdev_cxy == local_cxy ), "illegal DMA chdev in cluster %x\n", local_cxy );
     263            if( chdev_cxy != local_cxy )
     264            {
     265                printk("\d[PANIC] in %s : illegal DMA chdev in cluster %x\n",
     266                __FUNCTION__, local_cxy );
     267                hal_core_sleep();
     268            }
    237269
    238270            error = vfs_add_child_in_parent( local_cxy,
    239                                              INODE_TYPE_DEV,
    240271                                             FS_TYPE_DEVFS,
    241272                                             *devfs_internal_inode_xp,
     
    243274                                             &unused_xp,
    244275                                             &inode_xp );
    245 
    246 assert( (error == 0) , "cannot create DMA inode\n" );
    247 
    248             // update child inode "extend" field
    249             inode_cxy = GET_CXY( inode_xp );
     276            if( error )
     277            {
     278                printk("\n[PANIC] in %s : cannot create DMA inode in cluster %x\n",
     279                __FUNCTION__, local_cxy );
     280                hal_core_sleep();
     281            }
     282
     283            // update child inode "extend" and "type" fields
    250284            inode_ptr = GET_PTR( inode_xp );
    251             hal_remote_spt( XPTR( inode_cxy , &inode_ptr->extend ) , chdev_ptr );
     285            inode_ptr->extend = chdev_ptr;
     286            inode_ptr->type   = INODE_TYPE_DEV;
    252287       
    253288#if DEBUG_DEVFS_LOCAL_INIT
     
    270305        {
    271306            error = vfs_add_child_in_parent( local_cxy,
    272                                              INODE_TYPE_DEV,
    273307                                             FS_TYPE_DEVFS,
    274308                                             devfs_external_inode_xp,
     
    276310                                             &unused_xp,
    277311                                             &inode_xp );
    278 
    279 assert( (error == 0) , "cannot create IOB inode\n" );
    280 
    281             // update child inode "extend" field
    282             inode_cxy = GET_CXY( inode_xp );
     312            if( error )
     313            {
     314                printk("\n[PANIC] in %s : cannot create IOB inode in cluster %x\n",
     315                __FUNCTION__, local_cxy );
     316                hal_core_sleep();
     317            }
     318
     319            // update child inode "extend" and "type" fields
    283320            inode_ptr = GET_PTR( inode_xp );
    284             hal_remote_spt( XPTR( inode_cxy , &inode_ptr->extend ) , chdev_ptr );
     321            inode_ptr->extend = chdev_ptr;
     322            inode_ptr->type   = INODE_TYPE_DEV;
    285323       
    286324#if DEBUG_DEVFS_LOCAL_INIT
     
    303341        {
    304342            error = vfs_add_child_in_parent( local_cxy,
    305                                              INODE_TYPE_DEV,
    306343                                             FS_TYPE_DEVFS,
    307344                                             devfs_external_inode_xp,
     
    310347                                             &inode_xp );
    311348
    312 assert( (error == 0) , "cannot create PIC inode\n" );
     349            if( error )
     350            {
     351                printk("\n[PANIC] in %s : cannot create PIC inode in cluster %x\n",
     352                __FUNCTION__, local_cxy );
     353                hal_core_sleep();
     354            }
    313355
    314356            // update child inode "extend" field
    315             inode_cxy = GET_CXY( inode_xp );
    316357            inode_ptr = GET_PTR( inode_xp );
    317             hal_remote_spt( XPTR( inode_cxy , &inode_ptr->extend ) , chdev_ptr );
     358            inode_ptr->extend = chdev_ptr;
     359            inode_ptr->type   = INODE_TYPE_DEV;
    318360       
    319361#if DEBUG_DEVFS_LOCAL_INIT
     
    338380            {
    339381                error = vfs_add_child_in_parent( local_cxy,
    340                                                  INODE_TYPE_DEV,
    341382                                                 FS_TYPE_DEVFS,
    342383                                                 devfs_external_inode_xp,
     
    345386                                                 &inode_xp );
    346387
    347 assert( (error == 0) , "cannot create TXT_RX inode\n" );
    348 
    349                 // update child inode "extend" field
    350                 inode_cxy = GET_CXY( inode_xp );
     388                if( error )
     389                {
     390                    printk("\n[PANIC] in %s : cannot create TXT_RX inode in cluster %x\n",
     391                    __FUNCTION__, local_cxy );
     392                    hal_core_sleep();
     393                }
     394
     395                // update child inode "extend" and "type" fields
    351396                inode_ptr = GET_PTR( inode_xp );
    352                 hal_remote_spt( XPTR( inode_cxy , &inode_ptr->extend ) , chdev_ptr );
     397                inode_ptr->extend = chdev_ptr;
     398                inode_ptr->type   = INODE_TYPE_DEV;
    353399       
    354400#if DEBUG_DEVFS_LOCAL_INIT
     
    374420            {
    375421                error = vfs_add_child_in_parent( local_cxy,
    376                                                  INODE_TYPE_DEV,
    377422                                                 FS_TYPE_DEVFS,
    378423                                                 devfs_external_inode_xp,
     
    380425                                                 &unused_xp,
    381426                                                 &inode_xp );
    382 
    383 assert( (error == 0) , "cannot create TXT_TX inode\n" );
    384 
    385                 // update child inode "extend" field
    386                 inode_cxy = GET_CXY( inode_xp );
     427                if( error )
     428                {
     429                    printk("\n[PANIC] in %s : cannot create TXT_TX inode in cluster %x\n",
     430                    __FUNCTION__, local_cxy );
     431                    hal_core_sleep();
     432                }
     433
     434                // update child inode "extend" and "type" fields
    387435                inode_ptr = GET_PTR( inode_xp );
    388                 hal_remote_spt( XPTR( inode_cxy , &inode_ptr->extend ) , chdev_ptr );
     436                inode_ptr->extend = chdev_ptr;
     437                inode_ptr->type   = INODE_TYPE_DEV;
    389438       
    390439#if DEBUG_DEVFS_LOCAL_INIT
     
    410459            {
    411460                error = vfs_add_child_in_parent( local_cxy,
    412                                                  INODE_TYPE_DEV,
    413461                                                 FS_TYPE_DEVFS,
    414462                                                 devfs_external_inode_xp,
     
    416464                                                 &unused_xp,
    417465                                                 &inode_xp );
    418 
    419 assert( (error == 0) , "cannot create IOC inode\n" );
    420 
    421                 // update child inode "extend" field
    422                 inode_cxy = GET_CXY( inode_xp );
     466                if( error )
     467                {
     468                    printk("\n[PANIC] in %s : cannot create IOC inode in cluster %x\n",
     469                    __FUNCTION__, local_cxy );
     470                    hal_core_sleep();
     471                }
     472
     473                // update child inode "extend" and "type" fields
    423474                inode_ptr = GET_PTR( inode_xp );
    424                 hal_remote_spt( XPTR( inode_cxy , &inode_ptr->extend ) , chdev_ptr );
     475                inode_ptr->extend = chdev_ptr;
     476                inode_ptr->type   = INODE_TYPE_DEV;
    425477       
    426478#if DEBUG_DEVFS_LOCAL_INIT
     
    446498            {
    447499                error = vfs_add_child_in_parent( local_cxy,
    448                                                  INODE_TYPE_DEV,
    449500                                                 FS_TYPE_DEVFS,
    450501                                                 devfs_external_inode_xp,
     
    452503                                                 &unused_xp,
    453504                                                 &inode_xp );
    454 
    455 assert( (error == 0) , "cannot create FBF inode\n" );
    456 
    457                 // update child inode "extend" field
    458                 inode_cxy = GET_CXY( inode_xp );
     505                if( error )
     506                {
     507                    printk("\n[PANIC] in %s : cannot create FBF inode in cluster %x\n",
     508                    __FUNCTION__, local_cxy );
     509                    hal_core_sleep();
     510                }
     511
     512                // update child inode "extend" and "type" fields
    459513                inode_ptr = GET_PTR( inode_xp );
    460                 hal_remote_spt( XPTR( inode_cxy , &inode_ptr->extend ) , chdev_ptr );
     514                inode_ptr->extend = chdev_ptr;
     515                inode_ptr->type   = INODE_TYPE_DEV;
    461516       
    462517#if DEBUG_DEVFS_LOCAL_INIT
     
    482537            {
    483538                error = vfs_add_child_in_parent( local_cxy,
    484                                                  INODE_TYPE_DEV,
    485539                                                 FS_TYPE_DEVFS,
    486540                                                 devfs_external_inode_xp,
     
    488542                                                 &unused_xp,
    489543                                                 &inode_xp );
    490 
    491 assert( (error == 0) , "cannot create NIC_RX inode\n" );
    492 
    493                 // update child inode "extend" field
    494                 inode_cxy = GET_CXY( inode_xp );
     544                if( error )
     545                {
     546                    printk("\n[PANIC] in %s : cannot create NIC_RX inode in cluster %x\n",
     547                    __FUNCTION__, local_cxy );
     548                    hal_core_sleep();
     549                }
     550
     551                // update child inode "extend" and "type" fields
    495552                inode_ptr = GET_PTR( inode_xp );
    496                 hal_remote_spt( XPTR( inode_cxy , &inode_ptr->extend ) , chdev_ptr );
     553                inode_ptr->extend = chdev_ptr;
     554                inode_ptr->type   = INODE_TYPE_DEV;
    497555 
    498556#if DEBUG_DEVFS_LOCAL_INIT
     
    518576            {
    519577                error = vfs_add_child_in_parent( local_cxy,
    520                                                  INODE_TYPE_DEV,
    521578                                                 FS_TYPE_DEVFS,
    522579                                                 devfs_external_inode_xp,
     
    524581                                                 &unused_xp,
    525582                                                 &inode_xp );
    526 
    527 assert( (error == 0) , "cannot create NIC_TX inode\n" );
    528 
    529                 // update child inode "extend" field
    530                 inode_cxy = GET_CXY( inode_xp );
     583                if( error )
     584                {
     585                    printk("\n[PANIC] in %s : cannot create NIC_TX inode in cluster %x\n",
     586                    __FUNCTION__, local_cxy );
     587                    hal_core_sleep();
     588                }
     589
     590                // update child inode "extend" and "type" fields
    531591                inode_ptr = GET_PTR( inode_xp );
    532                 hal_remote_spt( XPTR( inode_cxy , &inode_ptr->extend ) , chdev_ptr );
     592                inode_ptr->extend = chdev_ptr;
     593                inode_ptr->type   = INODE_TYPE_DEV;
    533594       
    534595#if DEBUG_DEVFS_LOCAL_INIT
  • trunk/kernel/fs/fatfs.c

    r614 r623  
    793793#if (DEBUG_FATFS_CTX_INIT & 0x1)
    794794if( DEBUG_FATFS_CTX_INIT < cycle )
    795 {
    796     uint32_t   line;
    797     uint32_t   byte = 0;
    798     printk("\n***** %s : FAT boot record\n", __FUNCTION__ );
    799     for ( line = 0 ; line < 32 ; line++ )
    800     {
    801         printk(" %X | %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x |\n",
    802                byte,
    803                buffer[byte+ 0],buffer[byte+ 1],buffer[byte+ 2],buffer[byte+ 3],
    804                buffer[byte+ 4],buffer[byte+ 5],buffer[byte+ 6],buffer[byte+ 7],
    805                buffer[byte+ 8],buffer[byte+ 9],buffer[byte+10],buffer[byte+11],
    806                buffer[byte+12],buffer[byte+13],buffer[byte+14],buffer[byte+15] );
    807 
    808          byte += 16;
    809     }
    810 }
     795putb( "boot record", buffer , 256 );
    811796#endif
    812797
     
    960945assert( (inode != NULL) , "inode pointer is NULL\n" );
    961946assert( (dentry != NULL) , "dentry pointer is NULL\n" );
    962 assert( (inode->type == INODE_TYPE_DIR) , "inode is not a directory\n" );
    963947assert( (inode->mapper != NULL ) , "mapper pointer is NULL\n" );
    964948 
     
    13591343}  // end fatfs_remove_dentry
    13601344
    1361 /////////////////////////////////////////////////////
    1362 error_t fatfs_get_dentry( vfs_inode_t * parent_inode,
    1363                           char        * name,
    1364                           xptr_t        child_inode_xp )
     1345
     1346//////////////////////////////////////////////////////////////////////////////////////////////
     1347// This static function scan the pages of a mapper containing a FAT32 directory, identified
     1348// by the <mapper> argument, to find the directory entry identified by the <name> argument,
     1349// and return a pointer on the directory entry, described as and array of 32 bytes, and the
     1350// incex of this entry in the FAT32 mapper, seen as an array of 32 bytes entries.
     1351// It is called by the fatfs_new_dentry() and fatfs_update_dentry() functions.
     1352// It must be called by a thread running in the cluster containing the mapper.
     1353//////////////////////////////////////////////////////////////////////////////////////////////
     1354// @ mapper    : [in]  local pointer on directory mapper.
     1355// @ name      : [in]  searched directory entry name.
     1356// @ entry     : [out] buffer for the pointer on the 32 bytes directory entry (when found).
     1357// @ index     : [out] buffer for the directory entry index in mapper.
     1358// @ return 0 if found / return 1 if not found / return -1 if mapper access error.
     1359//////////////////////////////////////////////////////////////////////////////////////////////
     1360error_t fatfs_scan_directory( mapper_t *  mapper,
     1361                              char     *  name,
     1362                              uint8_t  ** entry,
     1363                              uint32_t *  index )
    13651364{
    13661365    // Two embedded loops to scan the directory mapper:
     
    13681367    // - scan the directory entries in each 4 Kbytes page
    13691368
    1370 #if DEBUG_FATFS_GET_DENTRY
     1369// check parent_inode and child_inode
     1370assert( (mapper != NULL) , "mapper pointer is NULL\n" );
     1371assert( (name   != NULL ), "child name is undefined\n" );
     1372assert( (entry  != NULL ), "entry buffer undefined\n" );
     1373
     1374#if DEBUG_FATFS_SCAN_DIRECTORY
    13711375char       parent_name[CONFIG_VFS_MAX_NAME_LENGTH];
    13721376uint32_t   cycle = (uint32_t)hal_get_cycles();
    13731377thread_t * this  = CURRENT_THREAD;
    1374 vfs_inode_get_name( XPTR( local_cxy , parent_inode ) , parent_name );
    1375 if( DEBUG_FATFS_GET_DENTRY < cycle )
    1376 printk("\n[%s]  thread[%x,%x] enter for child <%s> in parent <%s> / cycle %d\n",
     1378vfs_inode_get_name( XPTR( local_cxy , mapper->inode ) , parent_name );
     1379if( DEBUG_FATFS_SCAN_DIRECTORY < cycle )
     1380printk("\n[%s]  thread[%x,%x] enter to search child <%s> in parent <%s> / cycle %d\n",
    13771381__FUNCTION__, this->process->pid, this->trdid, name , parent_name , cycle );
    13781382#endif
    13791383
    1380 // check parent_inode and child_inode
    1381 assert( (parent_inode != NULL) , "parent_inode is NULL\n" );
    1382 assert( (child_inode_xp != XPTR_NULL ) , "child_inode is XPTR_NULL\n" );
    1383 
    1384     mapper_t * mapper    = parent_inode->mapper;
    1385     xptr_t     mapper_xp = XPTR( local_cxy , mapper );
    1386 
    1387 // check parent mapper
    1388 assert( (mapper != NULL) , "parent mapper is NULL\n");
    1389    
    1390     char       cname[CONFIG_VFS_MAX_NAME_LENGTH];  // name extracter from each directory entry
     1384    char       cname[CONFIG_VFS_MAX_NAME_LENGTH];  // name extracted from each directory entry
    13911385
    13921386    char       lfn1[16];         // buffer for one partial cname
    13931387    char       lfn2[16];         // buffer for one partial cname
    13941388    char       lfn3[16];         // buffer for one partial cname
     1389    xptr_t     mapper_xp;        // extended pointer on mapper descriptor
    13951390    xptr_t     page_xp;          // extended pointer on page descriptor
    13961391    xptr_t     base_xp;          // extended pointer on page base
     
    14001395    uint32_t   seq;              // sequence index
    14011396    uint32_t   lfn       = 0;    // LFN entries number
    1402     uint32_t   size      = 0;    // searched file/dir size (bytes)
    1403     uint32_t   cluster   = 0;    // searched file/dir cluster index
    1404     uint32_t   is_dir    = 0;    // searched file/dir type
    1405     int32_t    found     = 0;    // not found (0) / name found (1) / end of dir (-1)
     1397    int32_t    found     = 0;    // not yet = 0 / success = 1 / not found = 2 / error = -1
    14061398    uint32_t   page_id   = 0;    // page index in mapper
    1407     uint32_t   dentry_id = 0;    // directory entry index
    14081399    uint32_t   offset    = 0;    // byte offset in page
    14091400
    1410     // scan the parent directory mapper
     1401    mapper_xp = XPTR( local_cxy , mapper );
     1402
     1403    // scan the mapper pages
    14111404    while ( found == 0 )
    14121405    {
     
    14141407        page_xp = mapper_remote_get_page( mapper_xp , page_id );
    14151408
    1416         if( page_xp == XPTR_NULL) return EIO;
     1409        if( page_xp == XPTR_NULL)
     1410        {
     1411            found = -1;
     1412        }
    14171413
    14181414        // get page base
     
    14201416        base    = (uint8_t *)GET_PTR( base_xp );
    14211417
    1422 #if (DEBUG_FATFS_GET_DENTRY & 0x1)
    1423 if( DEBUG_FATFS_GET_DENTRY < cycle )
     1418#if (DEBUG_FATFS_SCAN_DIRECTORY & 0x1)
     1419if( DEBUG_FATFS_SCAN_DIRECTORY < cycle )
    14241420mapper_display_page( mapper_xp , page_id , 256 );
    14251421#endif
     
    14321428            if (ord == NO_MORE_ENTRY)                 // no more entry => break
    14331429            {
    1434                 found = -1;
     1430                found = 2;
    14351431            }
    14361432            else if ( ord == FREE_ENTRY )             // free entry => skip
     
    14771473                if ( strcmp( name , cname ) == 0 )
    14781474                {
    1479                     cluster = (fatfs_get_record( DIR_FST_CLUS_HI , base + offset , 1 ) << 16) |
    1480                               (fatfs_get_record( DIR_FST_CLUS_LO , base + offset , 1 )      ) ;
    1481                     dentry_id = ((page_id<<12) + offset)>>5;
    1482                     is_dir    = ((attr & ATTR_DIRECTORY) == ATTR_DIRECTORY);
    1483                     size      = fatfs_get_record( DIR_FILE_SIZE , base + offset , 1 );
     1475                    *entry = base + offset;
     1476                    *index = ((page_id<<12) + offset)>>5;
    14841477                    found     = 1;
    14851478                }
     
    14941487    }  // end loop on pages
    14951488
    1496     // analyse the result of scan
    1497 
    1498     if ( found == -1 )  // found end of directory => failure
    1499     {
     1489    if( found == 1 )
     1490    {
     1491
     1492#if DEBUG_FATFS_SCAN_DIRECTORY
     1493cycle = (uint32_t)hal_get_cycles();
     1494if( DEBUG_FATFS_SCAN_DIRECTORY < cycle )
     1495printk("\n[%s]  thread[%x,%x] exit / found child <%s> in <%s>\n",
     1496__FUNCTION__, this->process->pid, this->trdid, name, parent_name );
     1497#endif
     1498        return 0;
     1499    }
     1500    else if( found == 2 )
     1501    {
     1502
     1503#if DEBUG_FATFS_SCAN_DIRECTORY
     1504cycle = (uint32_t)hal_get_cycles();
     1505if( DEBUG_FATFS_SCAN_DIRECTORY < cycle )
     1506printk("\n[%s]  thread[%x,%x] exit / child <%s> in <%s> not found\n",
     1507__FUNCTION__, this->process->pid, this->trdid, name, parent_name );
     1508#endif
     1509        return 1;
     1510    }
     1511    else
     1512    {
     1513        printk("\n[ERROR] in %s : cannot get page %d from mapper\n",
     1514        __FUNCTION__, page_id );
     1515
     1516        return -1;
     1517    }
     1518}  // end fatfs_scan_directory()
     1519
     1520
     1521
     1522/////////////////////////////////////////////////////
     1523error_t fatfs_new_dentry( vfs_inode_t * parent_inode,
     1524                          char        * name,
     1525                          xptr_t        child_inode_xp )
     1526{
     1527    uint8_t  * entry;    // pointer on FAT32 directory entry (array of 32 bytes)
     1528    uint32_t   index;    // index of FAT32 directory entry in mapper
     1529    mapper_t * mapper;   // pointer on directory mapper
     1530    uint32_t   cluster;  // directory entry cluster
     1531    uint32_t   size;     // directory entry size
     1532    bool_t     is_dir;   // directory entry type (file/dir)
     1533    error_t    error;
     1534
     1535// check arguments
     1536assert( (parent_inode != NULL)         , "parent_inode is NULL\n" );
     1537assert( (name         != NULL)         , "name is NULL\n" );
     1538assert( (child_inode_xp != XPTR_NULL ) , "child_inode is XPTR_NULL\n" );
     1539
     1540#if DEBUG_FATFS_GET_DENTRY
     1541char       parent_name[CONFIG_VFS_MAX_NAME_LENGTH];
     1542uint32_t   cycle = (uint32_t)hal_get_cycles();
     1543thread_t * this  = CURRENT_THREAD;
     1544vfs_inode_get_name( XPTR( local_cxy , parent_inode ) , parent_name );
     1545if( DEBUG_FATFS_GET_DENTRY < cycle )
     1546printk("\n[%s]  thread[%x,%x] enter for child <%s> in parent <%s> / cycle %d\n",
     1547__FUNCTION__, this->process->pid, this->trdid, name , parent_name , cycle );
     1548#endif
     1549
     1550    // get pointer and index of searched directory entry in mapper
     1551    mapper = parent_inode->mapper;
     1552    error  = fatfs_scan_directory( mapper, name , &entry , &index );
     1553
     1554    // update child inode and dentry descriptors if sucess
     1555    if( error == 0 )
     1556    {
    15001557
    15011558#if DEBUG_FATFS_GET_DENTRY
    15021559cycle = (uint32_t)hal_get_cycles();
    15031560if( DEBUG_FATFS_GET_DENTRY < cycle )
    1504 printk("\n[%s]  thread[%x,%x] exit / child <%s> not found / cycle %d\n",
    1505 __FUNCTION__, this->process->pid, this->trdid, name, cycle );
    1506 #endif
    1507 
    1508         return -1;
    1509     }
    1510 
    1511     // get child inode cluster and local pointer
    1512     cxy_t          inode_cxy = GET_CXY( child_inode_xp );
    1513     vfs_inode_t  * inode_ptr = GET_PTR( child_inode_xp );
    1514 
    1515     // build extended pointer on parent dentried root
    1516     xptr_t parents_root_xp = XPTR( inode_cxy , &inode_ptr->parents );
     1561printk("\n[%s]  thread[%x,%x] exit / intialised child <%s> in %s / cycle %d\n",
     1562__FUNCTION__, this->process->pid, this->trdid, name, parent_name, cycle );
     1563#endif
     1564        // get relevant infos from FAT32 directory entry
     1565        cluster = (fatfs_get_record( DIR_FST_CLUS_HI , entry , 1 ) << 16) |
     1566                  (fatfs_get_record( DIR_FST_CLUS_LO , entry , 1 )      ) ;
     1567        is_dir  = (fatfs_get_record( DIR_ATTR        , entry , 1 ) & ATTR_DIRECTORY);
     1568        size    =  fatfs_get_record( DIR_FILE_SIZE   , entry , 1 );
     1569
     1570        // get child inode cluster and local pointer
     1571        cxy_t          inode_cxy = GET_CXY( child_inode_xp );
     1572        vfs_inode_t  * inode_ptr = GET_PTR( child_inode_xp );
     1573
     1574        // build extended pointer on root of list of prent dentries
     1575        xptr_t parents_root_xp = XPTR( inode_cxy , &inode_ptr->parents );
    15171576
    15181577// check child inode has at least one parent
    15191578assert( (xlist_is_empty( parents_root_xp ) == false ), "child inode must have one parent\n");
    15201579
    1521     // get dentry pointers and cluster
    1522     xptr_t         dentry_xp  = XLIST_FIRST( parents_root_xp , vfs_dentry_t , parents );
    1523     vfs_dentry_t * dentry_ptr = GET_PTR( dentry_xp );
    1524     cxy_t          dentry_cxy = GET_CXY( dentry_xp );
     1580        // get dentry pointers and cluster
     1581        xptr_t         dentry_xp  = XLIST_FIRST( parents_root_xp , vfs_dentry_t , parents );
     1582        vfs_dentry_t * dentry_ptr = GET_PTR( dentry_xp );
     1583        cxy_t          dentry_cxy = GET_CXY( dentry_xp );
    15251584
    15261585// check dentry descriptor in same cluster as parent inode
    15271586assert( (dentry_cxy == local_cxy) , "illegal dentry cluster\n" );
    15281587
    1529     // update the child inode "type", "size", and "extend" fields
    1530     vfs_inode_type_t type = (is_dir) ? INODE_TYPE_DIR : INODE_TYPE_FILE;
    1531 
    1532     hal_remote_s32( XPTR( inode_cxy , &inode_ptr->type   ) , type );
    1533     hal_remote_s32( XPTR( inode_cxy , &inode_ptr->size   ) , size );
    1534     hal_remote_s32( XPTR( inode_cxy , &inode_ptr->extend ) , cluster );
    1535 
    1536     // update the dentry "extend" field
    1537     dentry_ptr->extend = (void *)(intptr_t)dentry_id;
    1538 
    1539 #if DEBUG_FATFS_GET_DENTRY
     1588        // update the child inode "type", "size", and "extend" fields
     1589        vfs_inode_type_t type = (is_dir) ? INODE_TYPE_DIR : INODE_TYPE_FILE;
     1590
     1591        hal_remote_s32( XPTR( inode_cxy , &inode_ptr->type   ) , type );
     1592        hal_remote_s32( XPTR( inode_cxy , &inode_ptr->size   ) , size );
     1593        hal_remote_s32( XPTR( inode_cxy , &inode_ptr->extend ) , cluster );
     1594
     1595        // update the dentry "extend" field
     1596        dentry_ptr->extend = (void *)(intptr_t)index;
     1597
     1598        return 0;
     1599    }
     1600    else
     1601    {
     1602        return -1;
     1603    }
     1604
     1605}  // end fatfs_new_dentry()
     1606
     1607//////////////////////////////////////////////////
     1608error_t fatfs_update_dentry( vfs_inode_t  * inode,
     1609                             vfs_dentry_t * dentry,
     1610                             uint32_t       size )
     1611{
     1612    uint8_t  * entry;    // pointer on FAT32 directory entry (array of 32 bytes)
     1613    uint32_t   index;    // index of FAT32 directory entry in mapper
     1614    mapper_t * mapper;   // pointer on directory mapper
     1615    error_t    error;
     1616
     1617// check arguments
     1618assert( (inode  != NULL) , "inode is NULL\n" );
     1619assert( (dentry != NULL) , "dentry is NULL\n" );
     1620assert( (size   != 0   ) , "size is 0\n" );
     1621
     1622#if DEBUG_FATFS_UPDATE_DENTRY
     1623char       dir_name[CONFIG_VFS_MAX_NAME_LENGTH];
     1624uint32_t   cycle = (uint32_t)hal_get_cycles();
     1625thread_t * this  = CURRENT_THREAD;
     1626vfs_inode_get_name( XPTR( local_cxy , inode ) , dir_name );
     1627if( DEBUG_FATFS_UPDATE_DENTRY < cycle )
     1628printk("\n[%s]  thread[%x,%x] enter for entry <%s> in dir <%s> / cycle %d\n",
     1629__FUNCTION__, this->process->pid, this->trdid, dentry->name , dir_name , cycle );
     1630#endif
     1631
     1632    // get pointer and index of searched directory entry in mapper
     1633    mapper = inode->mapper;
     1634    error  = fatfs_scan_directory( mapper, dentry->name , &entry , &index );
     1635
     1636    // update size in mapper if found
     1637    if( error == 0 )
     1638    {
     1639
     1640#if DEBUG_FATFS_UPDATE_DENTRY
    15401641cycle = (uint32_t)hal_get_cycles();
    1541 if( DEBUG_FATFS_GET_DENTRY < cycle )
    1542 printk("\n[%s]  thread[%x,%x] exit / child <%s> loaded in <%s> / cycle %d\n",
    1543 __FUNCTION__, this->process->pid, this->trdid, name, parent_name, cycle );
    1544 #endif
    1545 
    1546     return 0;
    1547 
    1548 }  // end fatfs_get_dentry()
     1642if( DEBUG_FATFS_UPDATE_DENTRY < cycle )
     1643printk("\n[%s]  thread[%x,%x] exit / found entry <%s> in <%s> / cycle %d\n",
     1644__FUNCTION__, this->process->pid, this->trdid, dentry->name, dir_name, cycle );
     1645#endif
     1646        // set size in FAT32 directory entry
     1647        fatfs_set_record( DIR_FILE_SIZE , entry , 1 , size );
     1648
     1649        // get local pointer on modified page base
     1650        void * base = (void *)((intptr_t)entry & (~CONFIG_PPM_PAGE_MASK));
     1651
     1652        // get extended pointer on modified page descriptor
     1653        xptr_t    page_xp = ppm_base2page( XPTR( local_cxy , base ) );
     1654
     1655        // mark page as dirty
     1656        ppm_page_do_dirty( page_xp );
     1657
     1658        return 0;
     1659    }
     1660    else
     1661    {
     1662        return -1;
     1663    }
     1664
     1665}  // end fatfs_update_dentry()
    15491666
    15501667///////////////////////////////////////////////////////
     
    20562173assert( (inode_xp != XPTR_NULL) , "inode pointer is NULL\n" );
    20572174
    2058     // get first_cluster from inode extension
     2175    // get inode cluster and local pointer
    20592176    inode_ptr     = GET_PTR( inode_xp );
    20602177    inode_cxy     = GET_CXY( inode_xp );
     2178
     2179    // get first_cluster from inode extension
    20612180    first_xp      = XPTR( inode_cxy , &inode_ptr->extend );
    20622181    first_cluster = (uint32_t)(intptr_t)hal_remote_lpt( first_xp );
     
    20732192printk("\n[%s] thread[%x,%x] enter for <%s> / first_cluster %x / cycle %d\n",
    20742193__FUNCTION__ , this->process->pid, this->trdid, name, first_cluster, cycle );
     2194#endif
     2195
     2196#if (DEBUG_FATFS_RELEASE_INODE & 1)
     2197fatfs_display_fat( 0 , 512 );
    20752198#endif
    20762199
  • trunk/kernel/fs/fatfs.h

    r614 r623  
    309309
    310310/*****************************************************************************************
    311  * This function implements the generic vfs_fs_get_dentry() function for the FATFS.
    312  *****************************************************************************************
    313  * It initialises a new child (new inode/dentry couple in Inode Tree), identified
    314  * by the <child_inode_xp> argument, from the parent directory mapper, identified by the
    315  * <parent_inode> argument.
     311 * This function implements the generic vfs_fs_new_dentry() function for the FATFS.
     312 *****************************************************************************************
     313 * It initializes a new inode/dentry couple in Inode Tree, attached to the directory
     314 * identified by the <parent_inode> argument. The new directory entry is identified
     315 * by the <name> argument. The child inode descriptor identified by the <child_inode_xp>
     316 * argument, and the dentry descriptor must have been previously allocated.
    316317 * It scan the parent mapper to find the <name> argument.
    317318 * It set the "type", "size", and "extend" fields in inode descriptor.
     
    324325 * @ return 0 if success / return ENOENT if child not found.
    325326 ****************************************************************************************/
    326 error_t fatfs_get_dentry( struct vfs_inode_s * parent_inode,
     327error_t fatfs_new_dentry( struct vfs_inode_s * parent_inode,
    327328                          char               * name,
    328329                          xptr_t               child_inode_xp );
    329330
    330331/*****************************************************************************************
     332 * This function implements the generic vfs_fs_update_dentry() function for the FATFS.
     333 *****************************************************************************************
     334 * It update the size of a directory entry identified by the <dentry> argument in
     335 * the mapper of a directory identified by the <inode> argument, as defined by the <size>
     336 * argument.
     337 * It scan the mapper to find the entry identified by the dentry "name" field.
     338 * It set the "size" field in the in the directory mapper AND marks the page as DIRTY.
     339 * It must be called by a thread running in the cluster containing the directory inode.
     340 *****************************************************************************************
     341 * @ inode        : local pointer on inode (directory).
     342 * @ dentry       : local pointer on dentry (for name).
     343 * @ size         : new size value.
     344 * @ return 0 if success / return ENOENT if child not found.
     345 ****************************************************************************************/
     346error_t fatfs_update_dentry( struct vfs_inode_s  * inode,
     347                             struct vfs_dentry_s * dentry,
     348                             uint32_t              size );
     349
     350/*****************************************************************************************
    331351 * This function implements the generic vfs_fs_get_user_dir() function for the FATFS.
    332352 *****************************************************************************************
    333353 * It is called by the remote_dir_create() function to scan the mapper of a directory
    334  * identified by the <inode> argument and copy up to <max_dirent> valid dentries to a
     354 * identified by the <inode> argument, and copy up to <max_dirent> valid dentries to a
    335355 * local dirent array, defined by the <array> argument. The <min_dentry> argument defines
    336  * the index of the first dentry to copied to the target dirent array.
     356 * the index of the first dentry to be copied to the target dirent array.
    337357 * This function returns in the <entries> buffer the number of dentries actually written,
    338358 * and signals in the <done> buffer when the last valid entry has been found.
    339359 * If the <detailed> argument is true, a dentry/inode couple that does not exist in
    340  * the Inode Tree is dynamically created, and all dirent fiels are documented in the
     360 * the Inode Tree is dynamically created, and all dirent fields are documented in the
    341361 * dirent array. Otherwise, only the dentry name is documented.
    342362 * It must be called by a thread running in the cluster containing the directory inode.
     
    443463 * The page - and the mapper - can be located in another cluster than the calling thread.
    444464 * The pointer on the mapper and the page index in file are found in the page descriptor.
    445  * It is used for both for a regular file/directory mapper, and the FAT mapper.
     465 * It is used for both a regular file/directory mapper, and the FAT mapper.
    446466 * For the FAT mapper, it access the FATFS to get the location on IOC device.
    447467 * For a regular file, it access the FAT mapper to get the cluster index on IOC device.
  • trunk/kernel/fs/ramfs.c

    r602 r623  
    3535                     char   * ramfs_root_name )
    3636{
    37     xptr_t    unused_xp;   // required by vfs_add_child_in_parent()                     
     37    xptr_t        dentry_xp;     // unused but required by vfs_add_child_in_parent()
     38    xptr_t        inode_xp;
     39    vfs_inode_t * inode_ptr;
    3840 
    3941    cxy_t     cxy = cluster_random_select();
     
    4143    // create VFS dentry and VFS inode for RAMFS root directory
    4244    return  vfs_add_child_in_parent( cxy,
    43                                      INODE_TYPE_DIR,
    4445                                     FS_TYPE_RAMFS,
    4546                                     parent_inode_xp,
    4647                                     ramfs_root_name,
    47                                      &unused_xp,
    48                                      &unused_xp );
     48                                     &dentry_xp,
     49                                     &inode_xp );
     50    // update inode type field
     51    inode_ptr = GET_PTR( inode_xp );
     52    inode_ptr->type = INODE_TYPE_DIR;
    4953}
    5054
  • trunk/kernel/fs/vfs.c

    r614 r623  
    33 *
    44 * Author  Mohamed Lamine Karaoui (2015)
    5  *         Alain Greiner (2016,2017,2018)
     5 *         Alain Greiner (2016,2017,2018,2019)
    66 *
    77 * Copyright (c) UPMC Sorbonne Universites
     
    142142////////////////////////////////////////////////////
    143143error_t vfs_inode_create( vfs_fs_type_t     fs_type,
    144                           vfs_inode_type_t  inode_type,
    145144                          uint32_t          attr,
    146145                          uint32_t          rights,
     
    214213
    215214    // initialize inode descriptor
    216     inode->type       = inode_type;
     215    inode->type       = INODE_TYPE_FILE;     // default value
    217216    inode->inum       = inum;
    218217    inode->attr       = attr;
     
    228227    mapper->inode     = inode;
    229228 
    230     // initialise threads waiting queue
    231     // xlist_root_init( XPTR( local_cxy , &inode->wait_root ) );
    232 
    233229    // initialize chidren dentries xhtab
    234230    xhtab_init( &inode->children , XHTAB_DENTRY_TYPE );
     
    278274    vfs_inode_t * ptr = GET_PTR( inode_xp );
    279275
     276    // build extended pointers on lock & size
     277    xptr_t   lock_xp = XPTR( cxy , &ptr->size_lock );
     278    xptr_t   size_xp = XPTR( cxy , &ptr->size );
     279
     280    // take lock in read mode
     281    remote_rwlock_rd_acquire( lock_xp );
     282
    280283    // get size
    281     remote_rwlock_rd_acquire( XPTR( cxy , &ptr->size_lock ) );
    282     uint32_t size = hal_remote_l32( XPTR( cxy , &ptr->size ) );
    283     remote_rwlock_rd_release( XPTR( cxy , &ptr->size_lock ) );
     284    uint32_t size = hal_remote_l32( size_xp );
     285
     286    // release lock from read mode
     287    remote_rwlock_rd_release( lock_xp );
     288
    284289    return size;
    285290}
    286291
    287 ////////////////////////////////////////////
    288 void vfs_inode_set_size( xptr_t    inode_xp,
    289                          uint32_t  size )
     292///////////////////////////////////////////////
     293void vfs_inode_update_size( xptr_t    inode_xp,
     294                            uint32_t  size )
    290295{
    291296    // get inode cluster and local pointer
     
    293298    vfs_inode_t * ptr = GET_PTR( inode_xp );
    294299
    295     // set size
    296     remote_rwlock_wr_release( XPTR( cxy , &ptr->size_lock ) );
    297     hal_remote_s32( XPTR( cxy , &ptr->size ) , size );
    298     remote_rwlock_wr_release( XPTR( cxy , &ptr->size_lock ) );
     300    // build extended pointers on lock & size
     301    xptr_t   lock_xp = XPTR( cxy , &ptr->size_lock );
     302    xptr_t   size_xp = XPTR( cxy , &ptr->size );
     303
     304    // take lock in write mode
     305    remote_rwlock_wr_acquire( lock_xp );
     306
     307    // get current size
     308    uint32_t current_size = hal_remote_l32( size_xp );
     309
     310    // set size if required
     311    if( current_size < size ) hal_remote_s32( size_xp , size );
     312
     313    // release lock from write mode
     314    remote_rwlock_wr_release( lock_xp );
    299315}
    300316
     
    546562
    547563// check refcount
    548 assert( (file->refcount == 0) , "refcount non zero\n" );
     564// assert( (file->refcount == 0) , "refcount non zero\n" );
    549565
    550566        kmem_req_t req;
     
    554570
    555571#if DEBUG_VFS_CLOSE
     572char name[CONFIG_VFS_MAX_NAME_LENGTH];
     573vfs_file_get_name( XPTR( local_cxy , file ) , name );
    556574thread_t * this = CURRENT_THREAD;
    557575uint32_t cycle = (uint32_t)hal_get_cycles();
    558576if( DEBUG_VFS_CLOSE < cycle )
    559 printk("\n[%s] thread[%x,%x] deleted file %x in cluster %x / cycle %d\n",
    560 __FUNCTION__, this->process->pid, this->trdid, file, local_cxy, cycle );
     577printk("\n[%s] thread[%x,%x] deleted file <%s> in cluster %x / cycle %d\n",
     578__FUNCTION__, this->process->pid, this->trdid, name, local_cxy, cycle );
    561579#endif
    562580
     
    585603    hal_remote_atomic_add( XPTR( file_cxy , &file_ptr->refcount ) , -1 );
    586604}
     605
     606///////////////////////////////////////
     607void vfs_file_get_name( xptr_t file_xp,
     608                        char * name )
     609{
     610    // get cluster and local pointer on remote file
     611    vfs_file_t * file_ptr = GET_PTR( file_xp );
     612    cxy_t        file_cxy = GET_CXY( file_xp );
     613
     614    // get pointers on remote inode
     615    vfs_inode_t * inode_ptr = hal_remote_lpt( XPTR( file_cxy , &file_ptr->inode ) );
     616    xptr_t        inode_xp  = XPTR( file_cxy , inode_ptr );
     617
     618    // call the relevant function
     619    vfs_inode_get_name( inode_xp , name );
     620}
     621
    587622
    588623//////////////////////////////////////////////////////////////////////////////////////////
     
    889924}  // vfs_lseek()
    890925
    891 ///////////////////////////////////
     926////////////////////////////////////
    892927error_t vfs_close( xptr_t   file_xp,
    893928                   uint32_t file_id )
    894929{
    895     cluster_t  * cluster;          // local pointer on local cluster
    896     cxy_t        file_cxy;         // cluster containing the file descriptor.
    897     vfs_file_t * file_ptr;         // local ponter on file descriptor
    898     cxy_t        owner_cxy;        // process owner cluster
    899     lpid_t       lpid;             // process local index
    900     xptr_t       root_xp;          // root of list of process copies
    901     xptr_t       lock_xp;          // lock protecting the list of copies
    902     xptr_t       iter_xp;          // iterator on list of process copies
    903     xptr_t       process_xp;       // extended pointer on one process copy
    904     cxy_t        process_cxy;      // process copy cluster
    905     process_t  * process_ptr;      // process copy local pointer
    906 
    907 // check arguments
    908 assert( (file_xp != XPTR_NULL) , "file_xp == XPTR_NULL\n" );
    909 assert( (file_id < CONFIG_PROCESS_FILE_MAX_NR) , "illegal file_id\n" );
     930    cxy_t         file_cxy;         // cluster containing the file descriptor.
     931    vfs_file_t  * file_ptr;         // local ponter on file descriptor
     932    cxy_t         owner_cxy;        // process owner cluster
     933    pid_t         pid;              // process identifier
     934    lpid_t        lpid;             // process local index
     935    xptr_t        root_xp;          // root of xlist (processes , or dentries)
     936    xptr_t        lock_xp;          // lock protecting the xlist
     937    xptr_t        iter_xp;          // iterator on xlist
     938    mapper_t    * mapper_ptr;       // local pointer on associated mapper
     939    xptr_t        mapper_xp;        // extended pointer on mapper
     940    vfs_inode_t * inode_ptr;        // local pointer on associated inode
     941    xptr_t        inode_xp;         // extended pointer on inode
     942    uint32_t      size;             // current file size (from inode descriptor)
     943    error_t       error;
     944
     945    char          name[CONFIG_VFS_MAX_NAME_LENGTH];  // file name
     946
     947// check argument
     948assert( (file_xp != XPTR_NULL) , "file_xp is XPTR_NULL\n" );
    910949
    911950    thread_t  * this    = CURRENT_THREAD;
    912951    process_t * process = this->process;
    913 
     952    cluster_t * cluster = LOCAL_CLUSTER;
     953
     954    // get file name
     955    vfs_file_get_name( file_xp , name );
     956   
    914957#if DEBUG_VFS_CLOSE
    915958uint32_t cycle = (uint32_t)hal_get_cycles();
    916959if( DEBUG_VFS_CLOSE < cycle )
    917 printk("\n[%s] thread[%x,%x] enter / fdid %d / cycle %d\n",
    918 __FUNCTION__, process->pid, this->trdid, file_id, cycle );
    919 #endif
    920 
    921     // get local pointer on local cluster manager
    922     cluster = LOCAL_CLUSTER;
     960printk("\n[%s] thread[%x,%x] enter for <%s> / cycle %d\n",
     961__FUNCTION__, process->pid, this->trdid, name, cycle );
     962#endif
     963
     964    // get cluster and local pointer on remote file descriptor
     965    file_cxy = GET_CXY( file_xp );
     966    file_ptr = GET_PTR( file_xp );
     967
     968    //////// 1) update all dirty pages from mapper to device
     969
     970    // get pointers on mapper associated to file
     971    mapper_ptr = hal_remote_lpt( XPTR( file_cxy , &file_ptr->mapper ) );
     972    mapper_xp  = XPTR( file_cxy , mapper_ptr );
     973
     974    // copy all dirty pages from mapper to device
     975    if( file_cxy == local_cxy )
     976    {
     977        error = mapper_sync( mapper_ptr );
     978    }
     979    else
     980    {
     981        rpc_mapper_sync_client( file_cxy,
     982                                mapper_ptr,
     983                                &error );
     984    }
     985
     986    if( error )
     987    {
     988        printk("\n[ERROR] in %s : cannot synchronise dirty pages for <%s>\n",
     989        __FUNCTION__, name );
     990        return -1;
     991    }
     992
     993#if DEBUG_VFS_CLOSE
     994if( DEBUG_VFS_CLOSE < cycle )
     995printk("\n[%s] thread[%x,%x] synchronised mapper of <%s> to device\n",
     996__FUNCTION__, process->pid, this->trdid, name );
     997#endif
     998
     999    //////// 2) update file size in all parent directory mapper(s) and on device
     1000
     1001    // get pointers on remote inode
     1002    inode_ptr = hal_remote_lpt( XPTR( file_cxy , &file_ptr->inode ) );
     1003    inode_xp  = XPTR( file_cxy , inode_ptr );
     1004
     1005    // get file size from remote inode
     1006    size = hal_remote_l32( XPTR( file_cxy , &inode_ptr->size ) );
     1007
     1008    // get root of list of parents dentry
     1009    root_xp = XPTR( file_cxy , &inode_ptr->parents );
     1010
     1011    // loop on all parents
     1012    XLIST_FOREACH( root_xp , iter_xp )
     1013    {
     1014        // get pointers on parent directory dentry
     1015        xptr_t         parent_dentry_xp  = XLIST_ELEMENT( iter_xp , vfs_dentry_t , parents );
     1016        cxy_t          parent_cxy        = GET_CXY( parent_dentry_xp );
     1017        vfs_dentry_t * parent_dentry_ptr = GET_PTR( parent_dentry_xp );
     1018
     1019        // get local pointer on parent directory inode
     1020        vfs_inode_t * parent_inode_ptr = hal_remote_lpt( XPTR( parent_cxy,
     1021                                                         &parent_dentry_ptr->parent ) );
     1022
     1023        // get local pointer on parent directory mapper
     1024        mapper_t * parent_mapper_ptr = hal_remote_lpt( XPTR( parent_cxy,
     1025                                                       &parent_inode_ptr->mapper ) );
     1026 
     1027        // update dentry size in parent directory mapper
     1028        if( parent_cxy == local_cxy )
     1029        {
     1030            error = vfs_fs_update_dentry( parent_inode_ptr,
     1031                                          parent_dentry_ptr,
     1032                                          size );
     1033        }
     1034        else
     1035        {
     1036            rpc_vfs_fs_update_dentry_client( parent_cxy,
     1037                                             parent_inode_ptr,
     1038                                             parent_dentry_ptr,
     1039                                             size,
     1040                                             &error );
     1041        }
     1042
     1043        if( error )
     1044        {
     1045            printk("\n[ERROR] in %s : cannot update size in parent\n",
     1046            __FUNCTION__ );
     1047            return -1;
     1048        }
     1049
     1050#if DEBUG_VFS_CLOSE
     1051char parent_name[CONFIG_VFS_MAX_NAME_LENGTH];
     1052vfs_inode_get_name( XPTR( parent_cxy , parent_inode_ptr ) , parent_name );
     1053if( DEBUG_VFS_CLOSE < cycle )
     1054printk("\n[%s] thread[%x,%x] updated size of <%s> in parent <%s>\n",
     1055__FUNCTION__, process->pid, this->trdid, name, parent_name );
     1056#endif
     1057
     1058        // copy all dirty pages from parent mapper to device
     1059        if( parent_cxy == local_cxy )
     1060        {
     1061            error = mapper_sync( parent_mapper_ptr );
     1062        }
     1063        else
     1064        {
     1065            rpc_mapper_sync_client( parent_cxy,
     1066                                    parent_mapper_ptr,
     1067                                    &error );
     1068        }
     1069
     1070        if( error )
     1071        {
     1072            printk("\n[ERROR] in %s : cannot synchronise parent mapper to device\n",
     1073            __FUNCTION__ );
     1074            return -1;
     1075        }
     1076
     1077#if DEBUG_VFS_CLOSE
     1078if( DEBUG_VFS_CLOSE < cycle )
     1079printk("\n[%s] thread[%x,%x] synchonized mapper of parent <%s> to device\n",
     1080__FUNCTION__, process->pid, this->trdid, parent_name );
     1081#endif
     1082
     1083    }
     1084
     1085    //////// 3) loop on the process copies to reset all fd_array[file_id] entries
    9231086
    9241087    // get owner process cluster and lpid
    925     owner_cxy  = CXY_FROM_PID( process->pid );
    926     lpid       = LPID_FROM_PID( process->pid );
     1088    pid        = process->pid;
     1089    owner_cxy  = CXY_FROM_PID( pid );
     1090    lpid       = LPID_FROM_PID( pid );
    9271091
    9281092    // get extended pointers on copies root and lock
     
    9301094    lock_xp = XPTR( owner_cxy , &cluster->pmgr.copies_lock[lpid] );
    9311095
    932     // 1) loop on the process descriptor copies to reset all fd_array[file_id] entries
    933 
    9341096    // take the lock protecting the list of copies
    9351097    remote_queuelock_acquire( lock_xp );
     
    9371099    XLIST_FOREACH( root_xp , iter_xp )
    9381100    {
    939         process_xp  = XLIST_ELEMENT( iter_xp , process_t , copies_list );
    940         process_cxy = GET_CXY( process_xp );
    941         process_ptr = GET_PTR( process_xp );
    942 
    943 #if (DEBUG_VFS_CLOSE & 1 )
    944 if( DEBUG_VFS_CLOSE < cycle )
    945 printk("\n[%s]  reset fd_array[%d] for process %x in cluster %x\n",
    946 __FUNCTION__, file_id, process_ptr, process_cxy );
    947 #endif
    948 
    949 // fd_array lock is required for atomic write of a 64 bits word
    950 // xptr_t fd_array_lock_xp = XPTR( process_cxy , &process_ptr->fd_array.lock );
    951 
    952         xptr_t entry_xp         = XPTR( process_cxy , &process_ptr->fd_array.array[file_id] );
    953 
    954 // remote_rwlock_wr_acquire( fd_array_lock_xp );
    955 
     1101        xptr_t      process_xp  = XLIST_ELEMENT( iter_xp , process_t , copies_list );
     1102        cxy_t       process_cxy = GET_CXY( process_xp );
     1103        process_t * process_ptr = GET_PTR( process_xp );
     1104
     1105        xptr_t entry_xp = XPTR( process_cxy , &process_ptr->fd_array.array[file_id] );
    9561106        hal_remote_s64( entry_xp , XPTR_NULL );
    957        
    958 // remote_rwlock_wr_release( fd_array_lock_xp );
    959 
    9601107        vfs_file_count_down( file_xp );
    961 
    9621108        hal_fence();
    9631109    }   
     
    9661112    remote_queuelock_release( lock_xp );
    9671113
    968 #if (DEBUG_VFS_CLOSE & 1)
     1114#if DEBUG_VFS_CLOSE
    9691115if( DEBUG_VFS_CLOSE < cycle )
    970 printk("\n[%s] thread[%x,%x] reset all fd-array copies\n",
    971 __FUNCTION__, process->pid, this->trdid );
    972 #endif
    973 
    974     // 2) release memory allocated to file descriptor in remote cluster
    975 
    976     // get cluster and local pointer on remote file descriptor
    977     file_cxy = GET_CXY( file_xp );
    978     file_ptr = GET_PTR( file_xp );
     1116printk("\n[%s] thread[%x,%x] reset all fd-array copies for <%x>\n",
     1117__FUNCTION__, process->pid, this->trdid, name );
     1118#endif
     1119
     1120    //////// 4) release memory allocated to file descriptor in remote cluster
    9791121
    9801122    if( file_cxy == local_cxy )             // file cluster is local
     
    9901132cycle = (uint32_t)hal_get_cycles();
    9911133if( DEBUG_VFS_CLOSE < cycle )
    992 printk("\n[%s] thread[%x,%x] exit / fdid %d closed / cycle %d\n",
    993 __FUNCTION__, process->pid, this->trdid, file_id, cycle );
     1134printk("\n[%s] thread[%x,%x] exit / <%s> closed / cycle %d\n",
     1135__FUNCTION__, process->pid, this->trdid, name, cycle );
    9941136#endif
    9951137
     
    11201262    {
    11211263        error = vfs_inode_create( parent_fs_type,
    1122                                   INODE_TYPE_DIR,
    11231264                                  attr,
    11241265                                  rights,
     
    11311272        rpc_vfs_inode_create_client( inode_cxy,
    11321273                                     parent_fs_type,
    1133                                      INODE_TYPE_DIR,
    11341274                                     attr,
    11351275                                     rights,
     
    11521292    // get new inode local pointer
    11531293    inode_ptr = GET_PTR( inode_xp );
     1294
     1295    // update inode "type" field
     1296    hal_remote_s32( XPTR( inode_cxy , &inode_ptr->type ) , INODE_TYPE_DIR );
    11541297   
    11551298#if(DEBUG_VFS_MKDIR & 1)
     
    14551598    xptr_t            dentry_xp;          // extended pointer on dentry to unlink
    14561599    vfs_dentry_t    * dentry_ptr;         // local pointer on dentry to unlink
     1600    vfs_ctx_t       * ctx_ptr;            // local pointer on FS context
     1601    vfs_fs_type_t     fs_type;            // File system type
    14571602
    14581603    char              name[CONFIG_VFS_MAX_NAME_LENGTH];  // name of link to remove
     
    14661611vfs_inode_get_name( root_xp , root_name );
    14671612if( DEBUG_VFS_UNLINK < cycle )
    1468 printk("\n[%s] thread[%x,%x] enter / root <%s> / path <%s> / cycle %d\n",
     1613printk("\n[%s] thread[%x,%x] : enter for root <%s> / path <%s> / cycle %d\n",
    14691614__FUNCTION__, process->pid, this->trdid, root_name, path, cycle );
    14701615#endif
     
    15011646vfs_inode_get_name( parent_xp , parent_name );
    15021647if( DEBUG_VFS_UNLINK < cycle )
    1503 printk("\n[%s] thread[%x,%x] parent inode <%s> is (%x,%x)\n",
     1648printk("\n[%s] thread[%x,%x] : parent inode <%s> is (%x,%x)\n",
    15041649__FUNCTION__, process->pid, this->trdid, parent_name, parent_cxy, parent_ptr );
    15051650#endif
     
    15081653    xptr_t children_xp = XPTR( parent_cxy , &parent_ptr->children );
    15091654
    1510     // get extended pointer on dentry to unlink
     1655    // try to get extended pointer on dentry from Inode Tree
    15111656    dentry_xp = xhtab_lookup( children_xp , name );
    15121657   
    1513     if( dentry_xp == XPTR_NULL )
    1514     {
    1515         remote_rwlock_wr_release( lock_xp );
    1516         printk("\n[ERROR] in %s : cannot get target dentry <%s> in <%s>\n",
    1517         __FUNCTION__, name, path );
    1518         return -1;
    1519     }
    1520    
    1521     // get local pointer on dentry to unlink
    1522     dentry_ptr = GET_PTR( dentry_xp );
     1658    // when dentry not found in Inode Tree, try to get it from inode tree
     1659
     1660    if( dentry_xp == XPTR_NULL )           // miss target dentry in Inode Tree
     1661    {
    15231662
    15241663#if( DEBUG_VFS_UNLINK & 1 )
    15251664if( DEBUG_VFS_UNLINK < cycle )
    1526 printk("\n[%s] thread[%x,%x] dentry <%s> to unlink is (%x,%x)\n",
    1527 __FUNCTION__, process->pid, this->trdid, name, parent_cxy, dentry_ptr );
    1528 #endif
    1529 
    1530     // get pointer on target inode
    1531     inode_xp  = hal_remote_l64( XPTR( parent_cxy , &dentry_ptr->child_xp ) );
    1532     inode_cxy = GET_CXY( inode_xp );
    1533     inode_ptr = GET_PTR( inode_xp );
    1534  
     1665printk("\n[%s] thread[%x,%x] : inode <%s> not found => scan parent mapper\n",
     1666__FUNCTION__, process->pid, this->trdid, name );
     1667#endif
     1668        // get parent inode FS type
     1669        ctx_ptr    = hal_remote_lpt( XPTR( parent_cxy , &parent_ptr->ctx ) );
     1670        fs_type    = hal_remote_l32( XPTR( parent_cxy , &ctx_ptr->type ) );
     1671
     1672        // select a cluster for new inode
     1673        inode_cxy = cluster_random_select();
     1674
     1675        // speculatively insert a new child dentry/inode couple in inode tree
     1676        error = vfs_add_child_in_parent( inode_cxy,
     1677                                         fs_type,
     1678                                         parent_xp,
     1679                                         name,
     1680                                         &dentry_xp,
     1681                                         &inode_xp );
     1682        if( error )
     1683        {
     1684            printk("\n[ERROR] in %s : cannot create inode <%s> in path <%s>\n",
     1685            __FUNCTION__ , name, path );
     1686
     1687            vfs_remove_child_from_parent( dentry_xp );
     1688            return -1;
     1689        }
     1690
     1691        // get local pointers on new dentry and new inode descriptors
     1692        inode_ptr  = GET_PTR( inode_xp );
     1693        dentry_ptr = GET_PTR( dentry_xp );
     1694
     1695        // scan parent mapper to find the missing dentry, and complete
     1696        // initialisation of new dentry and new inode descriptors In Inode Tree
     1697        if( parent_cxy == local_cxy )
     1698        {
     1699            error = vfs_fs_new_dentry( parent_ptr,
     1700                                       name,
     1701                                       inode_xp );
     1702        }
     1703        else
     1704        {
     1705            rpc_vfs_fs_new_dentry_client( parent_cxy,
     1706                                          parent_ptr,
     1707                                          name,
     1708                                          inode_xp,
     1709                                          &error );
     1710        }
     1711
     1712        if ( error )   // dentry not found in parent mapper
     1713        {
     1714            printk("\n[ERROR] in %s : cannot get dentry <%s> in path <%s>\n",
     1715            __FUNCTION__ , name, path );
     1716            return -1;
     1717        }
     1718
     1719#if (DEBUG_VFS_UNLINK & 1)
     1720if( DEBUG_VFS_UNLINK < cycle )
     1721printk("\n[%s] thread[%x,%x] : created missing inode & dentry <%s> in cluster %x\n",
     1722__FUNCTION__, process->pid, this->trdid, name, inode_cxy );
     1723#endif
     1724
     1725    }
     1726    else                                  // found target dentry in Inode Tree
     1727    {
     1728        dentry_ptr = GET_PTR( dentry_xp );
     1729       
     1730        // get pointer on target inode from dentry
     1731        inode_xp  = hal_remote_l64( XPTR( parent_cxy , &dentry_ptr->child_xp ) );
     1732        inode_cxy = GET_CXY( inode_xp );
     1733        inode_ptr = GET_PTR( inode_xp );
     1734    }
     1735
     1736    // At this point the Inode Tree contains the target dentry and child inode
     1737    // we can safely remove this dentry from both the parent mapper, and the Inode Tree.
     1738
    15351739#if( DEBUG_VFS_UNLINK & 1 )
    1536 char inode_name[CONFIG_VFS_MAX_NAME_LENGTH];
    1537 vfs_inode_get_name( inode_xp , inode_name );
    15381740if( DEBUG_VFS_UNLINK < cycle )
    1539 printk("\n[%s] thread[%x,%x] target inode <%s> is (%x,%x) / cycle %d\n",
    1540 __FUNCTION__, process->pid, this->trdid, inode_name, inode_cxy, inode_ptr, cycle );
     1741printk("\n[%s] thread[%x,%x] : dentry (%x,%x) / inode (%x,%x)\n",
     1742__FUNCTION__, process->pid, this->trdid, parent_cxy, dentry_ptr, inode_cxy, inode_ptr );
    15411743#endif
    15421744
     
    15451747    inode_links  = hal_remote_l32( XPTR( inode_cxy , &inode_ptr->links ) );
    15461748
    1547 // check target inode links counter
    1548 assert( (inode_links >= 1), "illegal inode links count %d for <%s>\n", inode_links, path );
    1549 
    15501749    ///////////////////////////////////////////////////////////////////////
    15511750    if( (inode_type == INODE_TYPE_FILE) || (inode_type == INODE_TYPE_DIR) )
    15521751    {
     1752
     1753#if( DEBUG_VFS_UNLINK & 1 )
     1754if( DEBUG_VFS_UNLINK < cycle )
     1755printk("\n[%s] thread[%x,%x] : unlink inode <%s> / type %s / %d links\n",
     1756__FUNCTION__, process->pid, this->trdid, name, vfs_inode_type_str(inode_type), inode_links );
     1757#endif
     1758
    15531759        // 1. Release clusters allocated to target inode
    15541760        //    and synchronize the FAT on IOC device if last link.
     
    15571763            // build extended pointer on target inode "children" number
    15581764            xptr_t inode_children_xp = XPTR( inode_cxy , &inode_ptr->children.items );
     1765
     1766printk("\n@@@ in %s : children_xp = (%x,%x)\n",
     1767__FUNCTION__, inode_cxy, &inode_ptr->children.items );
    15591768
    15601769            // get target inode number of children
     
    17131922
    17141923}  // end vfs_stat()
    1715 
    1716 /////////////////////////////////////////////
    1717 error_t vfs_readdir( xptr_t          file_xp,
    1718                      struct dirent * k_dirent )
    1719 {
    1720     assert( false , "not implemented file_xp: %x, k_dirent ptr %x\n",
    1721       file_xp, k_dirent );
    1722     return 0;
    1723 }
    1724 
    1725 ////////////////////////////////////
    1726 error_t vfs_rmdir( xptr_t   file_xp,
    1727                    char   * path )
    1728 {
    1729     assert( false , "not implemented file_xp: %x, path <%s>\n",
    1730       file_xp, path );
    1731     return 0;
    1732 }
    17331924
    17341925////////////////////////////////////
     
    21952386    cxy_t              child_cxy;    // cluster for child inode
    21962387    vfs_inode_t      * child_ptr;    // local pointer on child inode
    2197     vfs_inode_type_t   child_type;   // child inode type
    21982388    vfs_fs_type_t      fs_type;      // File system type
    21992389    vfs_ctx_t        * ctx_ptr;      // local pointer on FS context
     
    23192509                child_cxy = cluster_random_select();
    23202510
    2321                 // define child inode type
    2322                 if( dir ) child_type = INODE_TYPE_DIR;
    2323                 else      child_type = INODE_TYPE_FILE;
    2324  
    23252511                // insert a new child dentry/inode couple in inode tree
    23262512                error = vfs_add_child_in_parent( child_cxy,
    2327                                                  child_type,
    23282513                                                 fs_type,
    23292514                                                 parent_xp,
     
    23502535                if( parent_cxy == local_cxy )
    23512536                {
    2352                     error = vfs_fs_get_dentry( parent_ptr,
     2537                    error = vfs_fs_new_dentry( parent_ptr,
    23532538                                               name,
    23542539                                               child_xp );
     
    23562541                else
    23572542                {
    2358                     rpc_vfs_fs_get_dentry_client( parent_cxy,
     2543                    rpc_vfs_fs_new_dentry_client( parent_cxy,
    23592544                                                  parent_ptr,
    23602545                                                  name,
     
    29613146////////////////////////////////////////////////////////////////////
    29623147error_t vfs_add_child_in_parent( cxy_t              child_cxy,
    2963                                  vfs_inode_type_t   child_type,
    29643148                                 vfs_fs_type_t      fs_type,
    29653149                                 xptr_t             parent_inode_xp,
     
    30383222    {
    30393223        error = vfs_inode_create( fs_type,
    3040                                   child_type,
    30413224                                  attr,
    30423225                                  mode,
     
    30493232        rpc_vfs_inode_create_client( child_cxy,
    30503233                                     fs_type,
    3051                                      child_type,
    30523234                                     attr,
    30533235                                     mode,
     
    33093491
    33103492////////////////////////////////////////////////
    3311 error_t vfs_fs_get_dentry( vfs_inode_t * parent,
     3493error_t vfs_fs_new_dentry( vfs_inode_t * parent,
    33123494                           char        * name,
    33133495                           xptr_t        child_xp )
     
    33253507    if( fs_type == FS_TYPE_FATFS )
    33263508    {
    3327         error = fatfs_get_dentry( parent , name , child_xp );
     3509        error = fatfs_new_dentry( parent , name , child_xp );
    33283510    }
    33293511    else if( fs_type == FS_TYPE_RAMFS )
     
    33423524    return error;
    33433525
    3344 } // end vfs_fs_get_dentry()
     3526} // end vfs_fs_new_dentry()
     3527
     3528///////////////////////////////////////////////////
     3529error_t vfs_fs_update_dentry( vfs_inode_t  * inode,
     3530                              vfs_dentry_t * dentry,
     3531                              uint32_t       size )
     3532{
     3533    error_t error = 0;
     3534
     3535// check arguments
     3536assert( (inode  != NULL) , "inode  pointer is NULL\n");
     3537assert( (dentry != NULL) , "dentry pointer is NULL\n");
     3538
     3539    // get parent inode FS type
     3540    vfs_fs_type_t fs_type = inode->ctx->type;
     3541
     3542    // call relevant FS function
     3543    if( fs_type == FS_TYPE_FATFS )
     3544    {
     3545        error = fatfs_update_dentry( inode , dentry , size );
     3546    }
     3547    else if( fs_type == FS_TYPE_RAMFS )
     3548    {
     3549        assert( false , "should not be called for RAMFS\n" );
     3550    }
     3551    else if( fs_type == FS_TYPE_DEVFS )
     3552    {
     3553        assert( false , "should not be called for DEVFS\n" );
     3554    }
     3555    else
     3556    {
     3557        assert( false , "undefined file system type\n" );
     3558    }
     3559
     3560    return error;
     3561
     3562} // end vfs_fs_update_dentry()
    33453563
    33463564///////////////////////////////////////////////////
  • trunk/kernel/fs/vfs.h

    r614 r623  
    108108/******************************************************************************************
    109109 * This structure define a VFS inode.
    110  * An inode has several children dentries (if it is a directory), an can have several
     110 * An inode can have several children dentries (if it is a directory), an can have several
    111111 * parents dentries (if it hass several aliases links):
    112112 * - The "parents" field is the root of the xlist of parents dentries, and the "links"
     
    166166        remote_rwlock_t    size_lock;        /*! protect read/write to size                  */
    167167        remote_rwlock_t    main_lock;        /*! protect inode tree traversal and modifs     */
    168 //  list_entry_t       list;             /*! member of set of inodes in same cluster     */
    169 //  list_entry_t       wait_root;        /*! root of threads waiting on this inode       */
    170168        struct mapper_s  * mapper;           /*! associated file cache                       */
    171169        void             * extend;           /*! fs_type_specific inode extension            */
     
    195193
    196194/******************************************************************************************
    197  * This structure defines a directory entry.
     195 Rpt* This structure defines a directory entry.
    198196 * A dentry contains the name of a remote file/dir, an extended pointer on the
    199197 * inode representing this file/dir, a local pointer on the inode representing
     
    321319 *****************************************************************************************/
    322320error_t vfs_inode_create( vfs_fs_type_t     fs_type,
    323                           vfs_inode_type_t  inode_type,
    324321                          uint32_t          attr,
    325322                          uint32_t          rights,
     
    349346
    350347/******************************************************************************************
    351  * This function set the <size> of a file/dir to a remote inode,
    352  * taking the remote_rwlock protecting <size> in WRITE_MODE.
     348 * This function updates the "size" field of a remote inode identified by <inode_xp>.
     349 * It takes the rwlock protecting the file size in WRITE_MODE, and set the "size" field
     350 * when the current size is smaller than the requested <size> argument.
    353351 *****************************************************************************************
    354352 * @ inode_xp  : extended pointer on the remote inode.
    355  * @ size      : value to be written.
    356  *****************************************************************************************/
    357 void vfs_inode_set_size( xptr_t   inode_xp,
    358                          uint32_t size );
     353 * @ size      : requested size value.
     354 *****************************************************************************************/
     355void vfs_inode_update_size( xptr_t   inode_xp,
     356                            uint32_t size );
    359357
    360358/******************************************************************************************
     
    451449 * This function releases memory allocated to a local file descriptor.
    452450 * It must be executed by a thread running in the cluster containing the inode,
    453  * and the file refcount must be zero.
    454  * If the client thread is not running in the owner cluster, it must use the
    455  * rpc_vfs_file_destroy_client() function.
     451 * and the file refcount must be zero. Use the RPC_VFS_FILE_DESTROY if required.
    456452 ******************************************************************************************
    457453 * @ file  : local pointer on file descriptor.
     
    465461void vfs_file_count_up  ( xptr_t   file_xp );
    466462void vfs_file_count_down( xptr_t   file_xp );
     463
     464/******************************************************************************************
     465 * This debug function copies the name of a the file identified by <file_xp>
     466 * argument to a local buffer identified by the <name> argument.
     467 * The local buffer size must be at least CONFIG_VFS_MAX_NAME_LENGTH.
     468 *****************************************************************************************
     469 * @ file_xp  : extended pointer on the remote inode.
     470 * @ name     : local buffer pointer.
     471 *****************************************************************************************/
     472void vfs_file_get_name( xptr_t inode_xp,
     473                        char * name );
    467474
    468475
     
    537544 * Only the distributed Inode Tree is modified: it does NOT modify the parent mapper,
    538545 * and does NOT update the FS on IOC device.
     546 * It set the inode type to the default INODE_TYPE_FILE value
    539547 * It can be executed by any thread running in any cluster (can be different from both
    540548 * the child cluster and the parent cluster).
     
    552560 ******************************************************************************************
    553561 * @ child_inode_cxy  : [in]  target cluster for child inode.
    554  * @ child_inode_type : [in]  child inode type
    555562 * @ fs_type          : [in]  child inode FS type.
    556563 * @ parent_inode_xp  : [in]  extended pointer on parent inode.
     
    561568 *****************************************************************************************/
    562569error_t vfs_add_child_in_parent( cxy_t              child_inode_cxy,
    563                                  vfs_inode_type_t   child_inode_type,
    564570                                 vfs_fs_type_t      fs_type,
    565571                                 xptr_t             parent_inode_xp,
     
    729735/******************************************************************************************
    730736 * This function close the - non-replicated - file descriptor identified by the <file_xp>
    731  * and <file_id> arguments.
    732  * 1) All entries in the fd_array copies are directly reset by the calling thread,
     737 * and <file_id> arguments. The <file_id> is required to reset the fd_array[] slot.
     738 * It can be called by a thread running in any cluster, and executes the following actions:
     739 * 1) It access the block device to updates all dirty pages from the mapper associated
     740 *    to the file, and removes these pages from the dirty list, using an RPC if required.
     741 * 2) It updates the file size in all parent directory mapper(s), and update the modified
     742 *    pages on the block device, using RPCs if required.
     743 * 3) All entries in the fd_array copies are directly reset by the calling thread,
    733744 *    using remote accesses.
    734  * 2) The memory allocated to file descriptor in cluster containing the inode is released.
    735  *    It requires a RPC if cluster containing the file descriptor is remote.
    736  ******************************************************************************************
    737  * @ file_xp     : extended pointer on the file descriptor in owner cluster.
    738  * @ file_id     : file descriptor index in fd_array.
     745 * 4) The memory allocated to file descriptor in cluster containing the inode is released,
     746 *    using an RPC if cluster containing the file descriptor is remote.
     747 ******************************************************************************************
     748 * @ file_xp     : extended pointer on the file descriptor.
     749 * @ file_id     : file descriptor index in fd_array[].
    739750 * @ returns 0 if success / -1 if error.
    740751 *****************************************************************************************/
     
    877888/******************************************************************************************
    878889 * This function makes the I/O operation to move one page identified by the <page_xp>
    879  * argument to/from the IOC device from/to the mapper, as defined by <cmd_type>.
     890 * argument to/from the IOC device from/to the mapper, as defined by the <cmd_type>.
    880891 * Depending on the file system type, it calls the proper, FS specific function.
    881892 * It is used in case of MISS on the mapper, or when a dirty page in the mapper must
     
    918929 * Finally, it synchronously updates the parent directory on IOC device.
    919930 *
     931 * Depending on the file system type, it calls the relevant, FS specific function.
    920932 * It must be executed by a thread running in the cluster containing the parent directory.
    921  * It can be the RPC_VFS_VS_REMOVE_DENTRY. This function does NOT take any lock.
     933 * It can be the RPC_VFS_FS_REMOVE_DENTRY. This function does NOT take any lock.
    922934 ******************************************************************************************
    923935 * @ parent  : local pointer on parent (directory) inode.
     
    933945 * and updates both the child inode descriptor, identified by the <child_xp> argument,
    934946 * and the associated dentry descriptor :
    935  * - It set the "size", and "extend" fields in inode descriptor.
     947 * - It set the "size", "type", and "extend" fields in inode descriptor.
    936948 * - It set the "extend" field in dentry descriptor.
    937949 * It is called by the vfs_lookup() function in case of miss.
     
    939951 * Depending on the file system type, it calls the relevant, FS specific function.
    940952 * It must be called by a thread running in the cluster containing the parent inode.
    941  * This function does NOT take any lock.
     953 * It can be the RPC_VFS_FS_NEW_DENTRY. This function does NOT take any lock.
    942954 ******************************************************************************************
    943955 * @ parent    : local pointer on parent inode (directory).
    944956 * @ name      : child name.
    945957 * @ child_xp  : extended pointer on remote child inode (file or directory)
    946  * @ return 0 if success / return ENOENT if not found.
    947  *****************************************************************************************/
    948 error_t vfs_fs_get_dentry( vfs_inode_t * parent,
     958 * @ return 0 if success / return -1 if dentry not found.
     959 *****************************************************************************************/
     960error_t vfs_fs_new_dentry( vfs_inode_t * parent,
    949961                           char        * name,
    950962                           xptr_t        child_xp );
     963
     964/******************************************************************************************
     965 * This function scan the mapper of an an existing inode directory, identified by
     966 * the <inode> argument, to find a directory entry identified by the <dentry> argument,
     967 * and update the size for this directory entry in mapper, as defined by <size>.
     968 * The searched "name" is defined in the <dentry> argument, that must be in the same
     969 * cluster as the parent inode. It is called by the vfs_close() function.
     970 *
     971 * Depending on the file system type, it calls the relevant, FS specific function.
     972 * It must be called by a thread running in the cluster containing the parent inode.
     973 * It can be the RPC_VFS_FS_UPDATE_DENTRY. This function does NOT take any lock.
     974 ******************************************************************************************
     975 * @ parent    : local pointer on parent inode (directory).
     976 * @ dentry    : local pointer on dentry.
     977 * @ size      : new size value (bytes).
     978 * @ return 0 if success / return ENOENT if not found.
     979 *****************************************************************************************/
     980error_t vfs_fs_update_dentry( vfs_inode_t  * inode,
     981                              vfs_dentry_t * dentry,
     982                              uint32_t       size );
    951983
    952984/******************************************************************************************
  • trunk/kernel/kern/kernel_init.c

    r619 r623  
    33 *
    44 * Authors :  Mohamed Lamine Karaoui (2015)
    5  *            Alain Greiner  (2016,2017,2018)
     5 *            Alain Greiner  (2016,2017,2018,2019)
    66 *
    77 * Copyright (c) Sorbonne Universites
     
    113113cxy_t                local_cxy                               CONFIG_CACHE_LINE_ALIGNED;
    114114
    115 // This variable is used for CP0 cores synchronisation in kernel_init()
     115// This variable is used for core[0] cores synchronisation in kernel_init()
    116116__attribute__((section(".kdata")))
    117117xbarrier_t           global_barrier                          CONFIG_CACHE_LINE_ALIGNED;
     
    126126
    127127// kernel_init is the entry point defined in hal/tsar_mips32/kernel.ld
    128 // It is used by the bootloader.
     128// It is used by the bootloader to tranfer control to kernel.
    129129extern void kernel_init( boot_info_t * info );
    130130
     
    466466// These chdev descriptors are distributed on all clusters, using a modulo on a global
    467467// index, identically computed in all clusters.
    468 // This function is executed in all clusters by the CP0 core, that computes a global index
    469 // for all external chdevs. Each CP0 core creates only the chdevs that must be placed in
     468// This function is executed in all clusters by the core[0] core, that computes a global index
     469// for all external chdevs. Each core[0] core creates only the chdevs that must be placed in
    470470// the local cluster, because the global index matches the local index.
    471471// The relevant entries in all copies of the devices directory are initialised.
     
    626626
    627627///////////////////////////////////////////////////////////////////////////////////////////
    628 // This function is called by CP0 in cluster 0 to allocate memory and initialize the PIC
     628// This function is called by core[0] in cluster 0 to allocate memory and initialize the PIC
    629629// device, namely the informations attached to the external IOPIC controller, that
    630630// must be replicated in all clusters (struct iopic_input).
     
    791791
    792792///////////////////////////////////////////////////////////////////////////////////////////
    793 // This function is called by all CP0s in all cluster to complete the PIC device
     793// This function is called by all core[0]s in all cluster to complete the PIC device
    794794// initialisation, namely the informations attached to the LAPIC controller.
    795795// This initialisation must be done after the IOPIC initialisation, but before other
     
    899899///////////////////////////////////////////////////////////////////////////////////////////
    900900// This function is the entry point for the kernel initialisation.
    901 // It is executed by all cores in all clusters, but only core[0], called CP0,
    902 // initializes the shared resources such as the cluster manager, or the local peripherals.
     901// It is executed by all cores in all clusters, but only core[0] initializes
     902// the shared resources such as the cluster manager, or the local peripherals.
    903903// To comply with the multi-kernels paradigm, it accesses only local cluster memory, using
    904904// only information contained in the local boot_info_t structure, set by the bootloader.
    905 // Only CP0 in cluster 0 print the log messages.
     905// Only core[0] in cluster 0 print the log messages.
    906906///////////////////////////////////////////////////////////////////////////////////////////
    907907// @ info    : pointer on the local boot-info structure.
     
    925925
    926926    /////////////////////////////////////////////////////////////////////////////////
    927     // STEP 0 : Each core get its core identifier from boot_info, and makes
     927    // STEP 1 : Each core get its core identifier from boot_info, and makes
    928928    //          a partial initialisation of its private idle thread descriptor.
    929     //          CP0 initializes the "local_cxy" global variable.
    930     //          CP0 in cluster IO initializes the TXT0 chdev to print log messages.
     929    //          core[0] initializes the "local_cxy" global variable.
     930    //          core[0] in cluster[0] initializes the TXT0 chdev for log messages.
    931931    /////////////////////////////////////////////////////////////////////////////////
    932932
     
    936936                                  &core_gid );
    937937
    938     // all CP0s initialize cluster identifier
     938    // core[0] initialize cluster identifier
    939939    if( core_lid == 0 ) local_cxy = info->cxy;
    940940
     
    956956#endif
    957957
    958     // all CP0s initialize cluster info
     958    // core[0] initializes cluster info
    959959    if( core_lid == 0 ) cluster_info_init( info );
    960960
    961     // CP0 in cluster 0 initialises TXT0 chdev descriptor
     961    // core[0] in cluster[0] initialises TXT0 chdev descriptor
    962962    if( (core_lid == 0) && (core_cxy == 0) ) txt0_device_init( info );
     963
     964    // all cores check identifiers
     965    if( error )
     966    {
     967        printk("\n[PANIC] in %s : illegal core : gid %x / cxy %x / lid %d",
     968        __FUNCTION__, core_lid, core_cxy, core_lid );
     969        hal_core_sleep();
     970    }
    963971
    964972    /////////////////////////////////////////////////////////////////////////////////
     
    970978#if DEBUG_KERNEL_INIT
    971979if( (core_lid ==  0) & (local_cxy == 0) )
    972 printk("\n[%s] : exit barrier 0 : TXT0 initialized / cycle %d\n",
     980printk("\n[%s] : exit barrier 1 : TXT0 initialized / cycle %d\n",
    973981__FUNCTION__, (uint32_t)hal_get_cycles() );
    974982#endif
    975983
    976     /////////////////////////////////////////////////////////////////////////////
    977     // STEP 1 : all cores check core identifier.
    978     //          CP0 initializes the local cluster manager.
    979     //          This includes the memory allocators.
    980     /////////////////////////////////////////////////////////////////////////////
    981 
    982     // all cores check identifiers
    983     if( error )
    984     {
    985         printk("\n[PANIC] in %s : illegal core : gid %x / cxy %x / lid %d",
    986         __FUNCTION__, core_lid, core_cxy, core_lid );
    987         hal_core_sleep();
    988     }
    989 
    990     // all CP0s initialise DQDT (only CPO in cluster 0 build the quad-tree)
     984    /////////////////////////////////////////////////////////////////////////////////
     985    // STEP 2 : core[0] initializes the cluter manager,
     986    //          including the physical memory allocator.
     987    /////////////////////////////////////////////////////////////////////////////////
     988
     989    // core[0] initialises DQDT (only core[0] in cluster 0 build the quad-tree)
    991990    if( core_lid == 0 ) dqdt_init();
    992991   
    993     // all CP0s initialize other cluster manager complex structures
     992    // core[0] initialize other cluster manager complex structures
    994993    if( core_lid == 0 )
    995994    {
     
    10121011#if DEBUG_KERNEL_INIT
    10131012if( (core_lid ==  0) & (local_cxy == 0) )
    1014 printk("\n[%s] : exit barrier 1 : clusters initialised / cycle %d\n",
     1013printk("\n[%s] : exit barrier 2 : cluster manager initialized / cycle %d\n",
    10151014__FUNCTION__, (uint32_t)hal_get_cycles() );
    10161015#endif
    10171016
    10181017    /////////////////////////////////////////////////////////////////////////////////
    1019     // STEP 2 : CP0 initializes the process_zero descriptor.
    1020     //          CP0 in cluster 0 initializes the IOPIC device.
     1018    // STEP 3 : core[0] initializes the process_zero descriptor,
     1019    //          including the kernel VMM (both GPT and VSL)
    10211020    /////////////////////////////////////////////////////////////////////////////////
    10221021
     
    10251024    core    = &cluster->core_tbl[core_lid];
    10261025
    1027     // all CP0s initialize the process_zero descriptor
    1028     if( core_lid == 0 ) process_zero_create( &process_zero );
    1029 
    1030     // CP0 in cluster 0 initializes the PIC chdev,
     1026    // core[0] initializes the process_zero descriptor,
     1027    if( core_lid == 0 ) process_zero_create( &process_zero , info );
     1028
     1029    /////////////////////////////////////////////////////////////////////////////////
     1030    if( core_lid == 0 ) xbarrier_wait( XPTR( 0 , &global_barrier ),
     1031                                        (info->x_size * info->y_size) );
     1032    barrier_wait( &local_barrier , info->cores_nr );
     1033    /////////////////////////////////////////////////////////////////////////////////
     1034
     1035#if DEBUG_KERNEL_INIT
     1036if( (core_lid ==  0) & (local_cxy == 0) )
     1037printk("\n[%s] : exit barrier 3 : kernel processs initialized / cycle %d\n",
     1038__FUNCTION__, (uint32_t)hal_get_cycles() );
     1039#endif
     1040
     1041    /////////////////////////////////////////////////////////////////////////////////
     1042    // STEP 4 : all cores initialize their private MMU
     1043    //          core[0] in cluster 0 initializes the IOPIC device.
     1044    /////////////////////////////////////////////////////////////////////////////////
     1045
     1046    // all cores initialise their MMU
     1047    hal_mmu_init( &process_zero.vmm.gpt );
     1048
     1049    // core[0] in cluster[0] initializes the PIC chdev,
    10311050    if( (core_lid == 0) && (local_cxy == 0) ) iopic_init( info );
    10321051   
     
    10391058#if DEBUG_KERNEL_INIT
    10401059if( (core_lid ==  0) & (local_cxy == 0) )
    1041 printk("\n[%s] : exit barrier 2 : PIC initialised / cycle %d\n",
     1060printk("\n[%s] : exit barrier 4 : MMU and IOPIC initialized / cycle %d\n",
    10421061__FUNCTION__, (uint32_t)hal_get_cycles() );
    10431062#endif
    10441063
    10451064    ////////////////////////////////////////////////////////////////////////////////
    1046     // STEP 3 : CP0 initializes the distibuted LAPIC descriptor.
    1047     //          CP0 initializes the internal chdev descriptors
    1048     //          CP0 initialize the local external chdev descriptors
     1065    // STEP 5 : core[0] initializes the distibuted LAPIC descriptor.
     1066    //          core[0] initializes the internal chdev descriptors
     1067    //          core[0] initialize the local external chdev descriptors
    10491068    ////////////////////////////////////////////////////////////////////////////////
    10501069
    1051     // all CP0s initialize their local LAPIC extension,
     1070    // all core[0]s initialize their local LAPIC extension,
    10521071    if( core_lid == 0 ) lapic_init( info );
    10531072
    1054     // CP0 scan the internal (private) peripherals,
     1073    // core[0] scan the internal (private) peripherals,
    10551074    // and allocates memory for the corresponding chdev descriptors.
    10561075    if( core_lid == 0 ) internal_devices_init( info );
    10571076       
    10581077
    1059     // All CP0s contribute to initialise external peripheral chdev descriptors.
    1060     // Each CP0[cxy] scan the set of external (shared) peripherals (but the TXT0),
     1078    // All core[0]s contribute to initialise external peripheral chdev descriptors.
     1079    // Each core[0][cxy] scan the set of external (shared) peripherals (but the TXT0),
    10611080    // and allocates memory for the chdev descriptors that must be placed
    10621081    // on the (cxy) cluster according to the global index value.
     
    10721091#if DEBUG_KERNEL_INIT
    10731092if( (core_lid ==  0) & (local_cxy == 0) )
    1074 printk("\n[%s] : exit barrier 3 : all chdevs initialised / cycle %d\n",
     1093printk("\n[%s] : exit barrier 5 : all chdevs initialised / cycle %d\n",
    10751094__FUNCTION__, (uint32_t)hal_get_cycles() );
    10761095#endif
     
    10821101   
    10831102    /////////////////////////////////////////////////////////////////////////////////
    1084     // STEP 4 : All cores enable IPI (Inter Procesor Interrupt),
     1103    // STEP 6 : All cores enable IPI (Inter Procesor Interrupt),
    10851104    //          Alh cores initialize IDLE thread.
    1086     //          Only CP0 in cluster 0 creates the VFS root inode.
     1105    //          Only core[0] in cluster[0] creates the VFS root inode.
    10871106    //          It access the boot device to initialize the file system context.
    10881107    /////////////////////////////////////////////////////////////////////////////////
     
    11071126#endif
    11081127
    1109     // CPO in cluster 0 creates the VFS root
     1128    // core[O] in cluster[0] creates the VFS root
    11101129    if( (core_lid ==  0) && (local_cxy == 0 ) )
    11111130    {
     
    11371156            // 4. create VFS root inode in cluster 0
    11381157            error = vfs_inode_create( FS_TYPE_FATFS,                       // fs_type
    1139                                       INODE_TYPE_DIR,                      // inode_type
    11401158                                      0,                                   // attr
    11411159                                      0,                                   // rights
     
    11501168            }
    11511169
    1152             // 5. update FATFS root inode extension 
     1170            // 5. update FATFS root inode "type" and "extend" fields 
    11531171            cxy_t         vfs_root_cxy = GET_CXY( vfs_root_inode_xp );
    11541172            vfs_inode_t * vfs_root_ptr = GET_PTR( vfs_root_inode_xp );
     1173            hal_remote_s32( XPTR( vfs_root_cxy , &vfs_root_ptr->extend ), INODE_TYPE_DIR );
    11551174            hal_remote_spt( XPTR( vfs_root_cxy , &vfs_root_ptr->extend ),
    11561175                            (void*)(intptr_t)root_dir_cluster );
     
    11891208#if DEBUG_KERNEL_INIT
    11901209if( (core_lid ==  0) & (local_cxy == 0) )
    1191 printk("\n[%s] : exit barrier 4 : VFS root (%x,%x) in cluster 0 / cycle %d\n",
     1210printk("\n[%s] : exit barrier 6 : VFS root (%x,%x) in cluster 0 / cycle %d\n",
    11921211__FUNCTION__, GET_CXY(process_zero.vfs_root_xp),
    11931212GET_PTR(process_zero.vfs_root_xp), (uint32_t)hal_get_cycles() );
     
    11951214
    11961215    /////////////////////////////////////////////////////////////////////////////////
    1197     // STEP 5 : Other CP0s allocate memory for the selected FS context,
    1198     //          and initialise both the local FS context and the local VFS context
    1199     //          from values stored in cluster 0.
     1216    // STEP 7 : In all other clusters than cluster[0], the core[0] allocates memory
     1217    //          for the selected FS context, and initialise the local FS context and
     1218    //          the local VFS context from values stored in cluster 0.
    12001219    //          They get the VFS root inode extended pointer from cluster 0.
    12011220    /////////////////////////////////////////////////////////////////////////////////
     
    12591278#if DEBUG_KERNEL_INIT
    12601279if( (core_lid ==  0) & (local_cxy == 1) )
    1261 printk("\n[%s] : exit barrier 5 : VFS root (%x,%x) in cluster 1 / cycle %d\n",
     1280printk("\n[%s] : exit barrier 7 : VFS root (%x,%x) in cluster 1 / cycle %d\n",
    12621281__FUNCTION__, GET_CXY(process_zero.vfs_root_xp),
    12631282GET_PTR(process_zero.vfs_root_xp), (uint32_t)hal_get_cycles() );
     
    12651284
    12661285    /////////////////////////////////////////////////////////////////////////////////
    1267     // STEP 6 : CP0 in cluster 0 makes the global DEVFS tree initialisation:
     1286    // STEP 8 : core[0] in cluster 0 makes the global DEVFS initialisation:
    12681287    //          It initializes the DEVFS context, and creates the DEVFS
    12691288    //          "dev" and "external" inodes in cluster 0.
     
    13091328#if DEBUG_KERNEL_INIT
    13101329if( (core_lid ==  0) & (local_cxy == 0) )
    1311 printk("\n[%s] : exit barrier 6 : DEVFS root initialized in cluster 0 / cycle %d\n",
     1330printk("\n[%s] : exit barrier 8 : DEVFS root initialized in cluster 0 / cycle %d\n",
    13121331__FUNCTION__, (uint32_t)hal_get_cycles() );
    13131332#endif
    13141333
    13151334    /////////////////////////////////////////////////////////////////////////////////
    1316     // STEP 7 : All CP0s complete in parallel the DEVFS tree initialization.
    1317     //          Each CP0 get the "dev" and "external" extended pointers from
     1335    // STEP 9 : All core[0]s complete in parallel the DEVFS initialization.
     1336    //          Each core[0] get the "dev" and "external" extended pointers from
    13181337    //          values stored in cluster 0.
    1319     //          Then each CP0 in cluster(i) creates the DEVFS "internal" directory,
     1338    //          Then each core[0] in cluster(i) creates the DEVFS "internal" directory,
    13201339    //          and creates the pseudo-files for all chdevs in cluster (i).
    13211340    /////////////////////////////////////////////////////////////////////////////////
     
    13461365#if DEBUG_KERNEL_INIT
    13471366if( (core_lid ==  0) & (local_cxy == 0) )
    1348 printk("\n[%s] : exit barrier 7 : DEV initialized in cluster 0 / cycle %d\n",
     1367printk("\n[%s] : exit barrier 9 : DEVFS initialized in cluster 0 / cycle %d\n",
    13491368__FUNCTION__, (uint32_t)hal_get_cycles() );
    13501369#endif
    13511370
    1352     /////////////////////////////////////////////////////////////////////////////////
    1353     // STEP 8 : CP0 in cluster 0 creates the first user process (process_init)
     1371#if( DEBUG_KERNEL_INIT & 1 )
     1372if( (core_lid ==  0) & (local_cxy == 0) )
     1373vfs_display( vfs_root_inode_xp );
     1374#endif
     1375
     1376    /////////////////////////////////////////////////////////////////////////////////
     1377    // STEP 10 : core[0] in cluster 0 creates the first user process (process_init).
     1378    //           This include the first user process VMM (GPT and VSL) creation.
     1379    //           Finally, it prints the ALMOS-MKH banner.
    13541380    /////////////////////////////////////////////////////////////////////////////////
    13551381
    13561382    if( (core_lid == 0) && (local_cxy == 0) )
    13571383    {
    1358 
    1359 #if( DEBUG_KERNEL_INIT & 1 )
    1360 vfs_display( vfs_root_inode_xp );
    1361 #endif
    1362 
    13631384       process_init_create();
    13641385    }
    1365 
    1366     /////////////////////////////////////////////////////////////////////////////////
    1367     if( core_lid == 0 ) xbarrier_wait( XPTR( 0 , &global_barrier ),
    1368                                         (info->x_size * info->y_size) );
    1369     barrier_wait( &local_barrier , info->cores_nr );
    1370     /////////////////////////////////////////////////////////////////////////////////
    1371 
    1372 #if DEBUG_KERNEL_INIT
    1373 if( (core_lid ==  0) & (local_cxy == 0) )
    1374 printk("\n[%s] : exit barrier 8 : process init created / cycle %d\n",
    1375 __FUNCTION__, (uint32_t)hal_get_cycles() );
    1376 #endif
    13771386
    13781387#if (DEBUG_KERNEL_INIT & 1)
     
    13811390#endif
    13821391
    1383     /////////////////////////////////////////////////////////////////////////////////
    1384     // STEP 9 : CP0 in cluster 0 print banner
    1385     /////////////////////////////////////////////////////////////////////////////////
    1386    
    13871392    if( (core_lid == 0) && (local_cxy == 0) )
    13881393    {
    13891394        print_banner( (info->x_size * info->y_size) , info->cores_nr );
     1395    }
    13901396
    13911397#if( DEBUG_KERNEL_INIT & 1 )
     1398if( (core_lid ==  0) & (local_cxy == 0) )
    13921399printk("\n\n***** memory fooprint for main kernel objects\n\n"
    13931400                   " - thread descriptor  : %d bytes\n"
     
    14371444#endif
    14381445
    1439     }
     1446    // each core updates the register(s) definig the kernel
     1447    // entry points for interrupts, exceptions and syscalls...
     1448    hal_set_kentry();
    14401449
    14411450    // each core activates its private TICK IRQ
     
    14481457    /////////////////////////////////////////////////////////////////////////////////
    14491458
    1450 #if DEBUG_KERNEL_INIT
     1459#if( DEBUG_KERNEL_INIT & 1 )
    14511460thread_t * this = CURRENT_THREAD;
    14521461printk("\n[%s] : thread[%x,%x] on core[%x,%d] jumps to thread_idle_func() / cycle %d\n",
  • trunk/kernel/kern/printk.c

    r583 r623  
    4848
    4949    va_list    args;      // printf arguments
    50     uint32_t   ps;        // write pointer to the string buffer
     50    uint32_t   ps;        // pointer to the string buffer
    5151
    5252    ps = 0;   
     
    5757    while ( *format != 0 )
    5858    {
    59 
    6059        if (*format == '%')   // copy argument to string
    6160        {
     
    9897                break;
    9998            }
    100             case ('d'):             // decimal signed integer
     99            case ('b'):             // excactly 2 digits hexadecimal integer
     100            {
     101                int  val = va_arg( args, int );
     102                int  val_lsb = val & 0xF;
     103                int  val_msb = (val >> 4) & 0xF;
     104                buf[0] = HexaTab[val_msb];
     105                buf[1] = HexaTab[val_lsb];
     106                len  = 2;
     107                pbuf = buf;
     108                break;
     109            }
     110            case ('d'):             // up to 10 digits decimal signed integer
    101111            {
    102112                int val = va_arg( args, int );
     
    108118                for(i = 0; i < 10; i++)
    109119                {
    110 
    111120                    buf[9 - i] = HexaTab[val % 10];
    112121                    if (!(val /= 10)) break;
     
    116125                break;
    117126            }
    118             case ('u'):             // decimal unsigned integer
     127            case ('u'):             // up to 10 digits decimal unsigned integer
    119128            {
    120129                uint32_t val = va_arg( args, uint32_t );
     
    128137                break;
    129138            }
    130             case ('x'):             // 32 bits hexadecimal
    131             case ('l'):             // 64 bits hexadecimal
     139            case ('x'):             // up to 8 digits hexadecimal
     140            case ('l'):             // up to 16 digits hexadecimal
    132141            {
    133142                uint32_t imax;
     
    157166                break;
    158167            }
    159             case ('X'):             // 32 bits hexadecimal on 8 characters
     168            case ('X'):             // exactly 8 digits hexadecimal
    160169            {
    161170                uint32_t val = va_arg( args , uint32_t );
     
    238247            case ('c'):             /* char conversion */
    239248            {
    240                 int val = va_arg( *args , int );
     249                int  val = va_arg( *args , int );
    241250                len = 1;
    242                 buf[0] = val;
     251                buf[0] = (char)val;
    243252                pbuf = &buf[0];
    244253                break;
    245254            }
    246             case ('d'):             /* 32 bits decimal signed  */
     255            case ('b'):             // excactly 2 digits hexadecimal
     256            {
     257                int  val = va_arg( *args, int );
     258                int  val_lsb = val & 0xF;
     259                int  val_msb = (val >> 4) & 0xF;
     260                buf[0] = HexaTab[val_msb];
     261                buf[1] = HexaTab[val_lsb];
     262                len  = 2;
     263                pbuf = buf;
     264                break;
     265            }
     266            case ('d'):             /* up to 10 digits signed decimal */
    247267            {
    248268                int val = va_arg( *args , int );
     
    261281                break;
    262282            }
    263             case ('u'):             /* 32 bits decimal unsigned */
     283            case ('u'):             /* up to 10 digits unsigned decimal */
    264284            {
    265285                uint32_t val = va_arg( *args , uint32_t );
     
    273293                break;
    274294            }
    275             case ('x'):             /* 32 bits hexadecimal unsigned */
     295            case ('x'):             /* up to 8 digits hexadecimal */
    276296            {
    277297                uint32_t val = va_arg( *args , uint32_t );
     
    286306                break;
    287307            }
    288             case ('X'):             /* 32 bits hexadecimal unsigned  on 10 char */
     308            case ('X'):             /* exactly 8 digits hexadecimal */
    289309            {
    290310                uint32_t val = va_arg( *args , uint32_t );
     
    299319                break;
    300320            }
    301             case ('l'):            /* 64 bits hexadecimal unsigned */
    302             {
    303                 unsigned long long val = va_arg( *args , unsigned long long );
     321            case ('l'):            /* up to 16 digits hexadecimal */
     322            {
     323                uint64_t val = va_arg( *args , uint64_t );
    304324                dev_txt_sync_write( "0x" , 2 );
    305325                for(i = 0; i < 16; i++)
     
    312332                break;
    313333            }
    314             case ('L'):           /* 64 bits hexadecimal unsigned on 18 char */
    315             {
    316                 unsigned long long val = va_arg( *args , unsigned long long );
     334            case ('L'):           /* exactly 16 digits hexadecimal */
     335            {
     336                uint64_t val = va_arg( *args , uint64_t );
    317337                dev_txt_sync_write( "0x" , 2 );
    318338                for(i = 0; i < 16; i++)
     
    525545}
    526546
     547/////////////////////////////
     548void putb( char     * string,
     549           uint8_t  * buffer,
     550           uint32_t   size )
     551{
     552    uint32_t line;
     553    uint32_t byte = 0;
     554
     555    // get pointers on TXT0 chdev
     556    xptr_t    txt0_xp  = chdev_dir.txt_tx[0];
     557    cxy_t     txt0_cxy = GET_CXY( txt0_xp );
     558    chdev_t * txt0_ptr = GET_PTR( txt0_xp );
     559
     560    // get extended pointer on remote TXT0 chdev lock
     561    xptr_t  lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock );
     562
     563    // get TXT0 lock
     564    remote_busylock_acquire( lock_xp );
     565
     566    // display string on TTY0
     567    nolock_printk("\n***** %s *****\n", string );
     568
     569    for ( line = 0 ; line < (size>>4) ; line++ )
     570    {
     571         nolock_printk(" %X | %b %b %b %b | %b %b %b %b | %b %b %b %b | %b %b %b %b \n",
     572         byte,
     573         buffer[byte+ 0],buffer[byte+ 1],buffer[byte+ 2],buffer[byte+ 3],
     574         buffer[byte+ 4],buffer[byte+ 5],buffer[byte+ 6],buffer[byte+ 7],
     575         buffer[byte+ 8],buffer[byte+ 9],buffer[byte+10],buffer[byte+11],
     576         buffer[byte+12],buffer[byte+13],buffer[byte+14],buffer[byte+15] );
     577
     578         byte += 16;
     579    }
     580
     581    // release TXT0 lock
     582    remote_busylock_release( lock_xp );
     583}
     584
     585
    527586
    528587// Local Variables:
  • trunk/kernel/kern/printk.h

    r583 r623  
    2424///////////////////////////////////////////////////////////////////////////////////
    2525// The printk.c and printk.h files define the functions used by the kernel
    26 // to display messages on a text terminal.
    27 // Two access modes are supported:
    28 // - The printk() function displays kernel messages on the kernel terminal TXT0,
    29 //   using a busy waiting policy: It calls directly the relevant TXT driver,
    30 //   after taking the TXT0 busylock for exclusive access to the TXT0 terminal.
    31 // - The user_printk() function displays messages on the calling thread private
    32 //   terminal, using a descheduling policy: it register the request in the selected
    33 //   TXT chdev waiting queue and deschedule. The calling thread is reactivated by
    34 //   the IRQ signalling completion.
    35 // Both functions use the generic TXT device to call the proper implementation
    36 // dependant TXT driver.
    37 // Finally these files define a set of conditional trace <***_dmsg> for debug.
     26// to display messages on the kernel terminal TXT0, using a busy waiting policy.
     27// It calls synchronously the TXT0 driver, without descheduling.
    3828///////////////////////////////////////////////////////////////////////////////////
    3929
     
    4434#include <stdarg.h>
    4535
    46 #include <hal_special.h> // hal_get_cycles()
     36#include <hal_special.h>
    4737
    4838/**********************************************************************************
    4939 * This function build a formatted string.
    5040 * The supported formats are defined below :
    51  *   %c : single character
    52  *   %d : signed decimal 32 bits integer
    53  *   %u : unsigned decimal 32 bits integer
    54  *   %x : hexadecimal 32 bits integer
    55  *   %l : hexadecimal 64 bits integer
     41 *   %b : exactly 2 digits hexadecimal integer (8 bits)
     42 *   %c : single ascii character (8 bits)
     43 *   %d : up to 10 digits decimal integer (32 bits)
     44 *   %u : up to 10 digits unsigned decimal (32 bits)
     45 *   %x : up to 8 digits hexadecimal integer (32 bits)
     46 *   %X : exactly 8 digits hexadecimal integer (32 bits)
     47 *   %l : up to 16 digits hexadecimal integer (64 bits)
     48 *   %L : exactly 16 digits hexadecimal integer (64 bits)
    5649 *   %s : NUL terminated character string
    5750 **********************************************************************************
     
    153146void putl( uint64_t val );
    154147
     148/**********************************************************************************
     149 * This debug function displays on the kernel TXT0 terminal the content of an
     150 * array of bytes defined by <buffer> and <size> arguments (16 bytes per line).
     151 * The <string> argument is displayed before the buffer content.
     152 * The line format is an address folowed by 16 (hexa) bytes.
     153 **********************************************************************************
     154 * @ string   : buffer name or identifier.
     155 * @ buffer   : local pointer on bytes array.
     156 * @ size     : number of bytes bytes to display.
     157 *********************************************************************************/
     158void putb( char     * string,
     159           uint8_t  * buffer,
     160           uint32_t   size );
     161
     162
    155163
    156164#endif  // _PRINTK_H
  • trunk/kernel/kern/process.c

    r619 r623  
    2929#include <hal_uspace.h>
    3030#include <hal_irqmask.h>
     31#include <hal_vmm.h>
    3132#include <errno.h>
    3233#include <printk.h>
     
    486487    }
    487488
    488     // FIXME decrement the refcount on file pointer by vfs_bin_xp [AG]
     489    // FIXME decrement the refcount on file pointer for vfs_bin_xp [AG]
     490
    489491    // FIXME close all open files [AG]
     492
    490493    // FIXME synchronize dirty files [AG]
    491494
     
    14871490        printk("\n[ERROR] in %s : cannot initialise VMM for %s\n", __FUNCTION__ , path );
    14881491        vfs_close( file_xp , file_id );
    1489         // FIXME restore old process VMM
     1492        // FIXME restore old process VMM [AG]
    14901493        return -1;
    14911494    }
     
    15051508                printk("\n[ERROR] in %s : failed to access <%s>\n", __FUNCTION__ , path );
    15061509        vfs_close( file_xp , file_id );
    1507         // FIXME restore old process VMM
     1510        // FIXME restore old process VMM [AG]
    15081511        return -1;
    15091512        }
     
    15351538
    15361539
    1537 ///////////////////////////////////////////////
    1538 void process_zero_create( process_t * process )
     1540////////////////////////////////////////////////
     1541void process_zero_create( process_t   * process,
     1542                          boot_info_t * info )
    15391543{
    15401544    error_t error;
     
    15661570    process->parent_xp  = XPTR( local_cxy , process );
    15671571    process->term_state = 0;
     1572
     1573    // initialise kernel GPT and VSL, depending on architecture
     1574    hal_vmm_kernel_init( info );
    15681575
    15691576    // reset th_tbl[] array and associated fields
  • trunk/kernel/kern/process.h

    r618 r623  
    7373 * is always stored in the same cluster as the inode associated to the file.
    7474 * A free entry in this array contains the XPTR_NULL value.
    75  * The array size is defined by a the CONFIG_PROCESS_FILE_MAX_NR parameter.
     75 * The array size is defined by the CONFIG_PROCESS_FILE_MAX_NR parameter.
    7676 *
    7777 * NOTE: - Only the fd_array[] in the reference process contains a complete list of open
     
    7979 *       - the fd_array[] in a process copy is simply a cache containing a subset of the
    8080 *         open files to speed the fdid to xptr translation, but the "lock" and "current
    81  *         fields should not be used.
     81 *         fields are not used.
    8282 *       - all modifications made by the process_fd_remove() are done in reference cluster
    8383 *         and reported in all process_copies.
     
    200200
    201201/*********************************************************************************************
    202  * This function initialize, in each cluster, the kernel "process_zero", that is the owner
    203  * of all kernel threads in a given cluster. It is called by the kernel_init() function.
     202 * This function initialize, in each cluster, the kernel "process_zero", that contains
     203 * all kernel threads in a given cluster. It is called by the kernel_init() function.
    204204 * The process_zero descriptor is allocated as a global variable in file kernel_init.c
    205205 * Both the PID and PPID fields are set to zero, the ref_xp is the local process_zero,
    206206 * and the parent process is set to XPTR_NULL. The th_tbl[] is initialized as empty.
    207  *********************************************************************************************
    208  * @ process      : [in] pointer on local process descriptor to initialize.
    209  ********************************************************************************************/
    210 void process_zero_create( process_t * process );
     207 * The process GPT is initialised as required by the target architecture.
     208 * The "kcode" and "kdata" segments are registered in the process VSL.
     209 *********************************************************************************************
     210 * @ process  : [in] pointer on process descriptor to initialize.
     211 * @ info     : pointer on local boot_info_t (for kernel segments base and size).
     212 ********************************************************************************************/
     213void process_zero_create( process_t   * process,
     214                          boot_info_t * info );
    211215
    212216/*********************************************************************************************
     
    428432 * identified by the <process_xp> argument, register the <file_xp> argument in the
    429433 * allocated slot, and return the slot index in the <fdid> buffer.
    430  * It can be called by any thread in any cluster, because it uses portable remote access
     434 * It can be called by any thread in any cluster, because it uses remote access
    431435 * primitives to access the reference process descriptor.
    432436 * It takes the lock protecting the reference fd_array against concurrent accesses.
  • trunk/kernel/kern/rpc.c

    r619 r623  
    22 * rpc.c - RPC operations implementation.
    33 *
    4  * Author    Alain Greiner (2016,2017,2018)
     4 * Author    Alain Greiner (2016,2017,2018,2019)
    55 *
    66 * Copyright (c)  UPMC Sorbonne Universites
     
    5858    &rpc_thread_user_create_server,        // 6
    5959    &rpc_thread_kernel_create_server,      // 7
    60     &rpc_undefined,                        // 8    unused slot       
     60    &rpc_vfs_fs_update_dentry_server,      // 8
    6161    &rpc_process_sigaction_server,         // 9
    6262
     
    6767    &rpc_vfs_file_create_server,           // 14
    6868    &rpc_vfs_file_destroy_server,          // 15
    69     &rpc_vfs_fs_get_dentry_server,         // 16
     69    &rpc_vfs_fs_new_dentry_server,         // 16
    7070    &rpc_vfs_fs_add_dentry_server,         // 17
    7171    &rpc_vfs_fs_remove_dentry_server,      // 18
     
    7676    &rpc_kcm_alloc_server,                 // 22
    7777    &rpc_kcm_free_server,                  // 23
    78     &rpc_undefined,                        // 24   unused slot
     78    &rpc_mapper_sync_server,               // 24
    7979    &rpc_mapper_handle_miss_server,        // 25
    8080    &rpc_vmm_delete_vseg_server,           // 26
     
    9494    "THREAD_USER_CREATE",        // 6
    9595    "THREAD_KERNEL_CREATE",      // 7
    96     "undefined",                 // 8
     96    "VFS_FS_UPDATE_DENTRY",      // 8
    9797    "PROCESS_SIGACTION",         // 9
    9898
     
    112112    "KCM_ALLOC",                 // 22
    113113    "KCM_FREE",                  // 23
    114     "undefined",                 // 24
     114    "MAPPER_SYNC",               // 24
    115115    "MAPPER_HANDLE_MISS",        // 25
    116116    "VMM_DELETE_VSEG",           // 26
     
    921921
    922922/////////////////////////////////////////////////////////////////////////////////////////
    923 // [7]      Marshaling functions attached to RPC_THREAD_KERNEL_CREATE (blocking)
     923// [7]      Marshaling functions attached to RPC_THREAD_KERNEL_CREATE
    924924/////////////////////////////////////////////////////////////////////////////////////////
    925925
     
    10131013
    10141014/////////////////////////////////////////////////////////////////////////////////////////
    1015 // [8]   undefined slot
    1016 /////////////////////////////////////////////////////////////////////////////////////////
    1017 
     1015// [8]   Marshaling functions attached to RPC_VRS_FS_UPDATE_DENTRY
     1016/////////////////////////////////////////////////////////////////////////////////////////
     1017
     1018/////////////////////////////////////////////////////////
     1019void rpc_vfs_fs_update_dentry_client( cxy_t          cxy,
     1020                                      vfs_inode_t  * inode,
     1021                                      vfs_dentry_t * dentry,
     1022                                      uint32_t       size,
     1023                                      error_t      * error )
     1024{
     1025#if DEBUG_RPC_VFS_FS_UPDATE_DENTRY
     1026thread_t * this = CURRENT_THREAD;
     1027uint32_t cycle = (uint32_t)hal_get_cycles();
     1028if( cycle > DEBUG_RPC_VFS_FS_UPDATE_DENTRY )
     1029printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n",
     1030__FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle );
     1031#endif
     1032
     1033    uint32_t responses = 1;
     1034
     1035    // initialise RPC descriptor header
     1036    rpc_desc_t  rpc;
     1037    rpc.index    = RPC_VFS_FS_UPDATE_DENTRY;
     1038    rpc.blocking = true;
     1039    rpc.rsp      = &responses;
     1040
     1041    // set input arguments in RPC descriptor
     1042    rpc.args[0] = (uint64_t)(intptr_t)inode;
     1043    rpc.args[1] = (uint64_t)(intptr_t)dentry;
     1044    rpc.args[2] = (uint64_t)size;
     1045
     1046    // register RPC request in remote RPC fifo
     1047    rpc_send( cxy , &rpc );
     1048
     1049    // get output values from RPC descriptor
     1050    *error   = (error_t)rpc.args[3];
     1051
     1052#if DEBUG_RPC_VFS_FS_UPDATE_DENTRY
     1053cycle = (uint32_t)hal_get_cycles();
     1054if( cycle > DEBUG_RPC_VFS_FS_UPDATE_DENTRY )
     1055printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n",
     1056__FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle );
     1057#endif
     1058}
     1059
     1060/////////////////////////////////////////////////
     1061void rpc_vfs_fs_update_dentry_server( xptr_t xp )
     1062{
     1063#if DEBUG_RPC_VFS_FS_UPDATE_DENTRY
     1064thread_t * this = CURRENT_THREAD;
     1065uint32_t cycle = (uint32_t)hal_get_cycles();
     1066if( cycle > DEBUG_RPC_VFS_FS_UPDATE_DENTRY )
     1067printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n",
     1068__FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle );
     1069#endif
     1070
     1071    error_t        error;
     1072    vfs_inode_t  * inode;
     1073    vfs_dentry_t * dentry;
     1074    uint32_t       size;
     1075
     1076    // get client cluster identifier and pointer on RPC descriptor
     1077    cxy_t        client_cxy  = GET_CXY( xp );
     1078    rpc_desc_t * desc        = GET_PTR( xp );
     1079
     1080    // get input arguments
     1081    inode  = (vfs_inode_t*)(intptr_t) hal_remote_l64(XPTR(client_cxy , &desc->args[0]));
     1082    dentry = (vfs_dentry_t*)(intptr_t)hal_remote_l64(XPTR(client_cxy , &desc->args[1]));
     1083    size   = (uint32_t)               hal_remote_l64(XPTR(client_cxy , &desc->args[2]));
     1084
     1085    // call the kernel function
     1086    error = vfs_fs_update_dentry( inode , dentry , size );
     1087
     1088    // set output argument
     1089    hal_remote_s64( XPTR( client_cxy , &desc->args[3] ) , (uint64_t)error );
     1090
     1091#if DEBUG_RPC_VFS_FS_UPDATE_DENTRY
     1092cycle = (uint32_t)hal_get_cycles();
     1093if( cycle > DEBUG_RPC_VFS_FS_UPDATE_DENTRY )
     1094printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n",
     1095__FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle );
     1096#endif
     1097}
    10181098
    10191099/////////////////////////////////////////////////////////////////////////////////////////
     
    11101190void rpc_vfs_inode_create_client( cxy_t          cxy,     
    11111191                                  uint32_t       fs_type,    // in
    1112                                   uint32_t       inode_type, // in
    11131192                                  uint32_t       attr,       // in
    11141193                                  uint32_t       rights,     // in
     
    11361215    // set input arguments in RPC descriptor
    11371216    rpc.args[0] = (uint64_t)fs_type;
    1138     rpc.args[1] = (uint64_t)inode_type;
    1139     rpc.args[2] = (uint64_t)attr;
    1140     rpc.args[3] = (uint64_t)rights;
    1141     rpc.args[4] = (uint64_t)uid;
    1142     rpc.args[5] = (uint64_t)gid;
     1217    rpc.args[1] = (uint64_t)attr;
     1218    rpc.args[2] = (uint64_t)rights;
     1219    rpc.args[3] = (uint64_t)uid;
     1220    rpc.args[4] = (uint64_t)gid;
    11431221
    11441222    // register RPC request in remote RPC fifo
     
    11461224
    11471225    // get output values from RPC descriptor
    1148     *inode_xp = (xptr_t)rpc.args[6];
    1149     *error    = (error_t)rpc.args[7];
     1226    *inode_xp = (xptr_t)rpc.args[5];
     1227    *error    = (error_t)rpc.args[6];
    11501228
    11511229#if DEBUG_RPC_VFS_INODE_CREATE
     
    11691247
    11701248    uint32_t         fs_type;
    1171     uint32_t         inode_type;
    11721249    uint32_t         attr;
    11731250    uint32_t         rights;
     
    11831260    // get input arguments from client rpc descriptor
    11841261    fs_type    = (uint32_t)  hal_remote_l64( XPTR( client_cxy , &desc->args[0] ) );
    1185     inode_type = (uint32_t)  hal_remote_l64( XPTR( client_cxy , &desc->args[1] ) );
    1186     attr       = (uint32_t)  hal_remote_l64( XPTR( client_cxy , &desc->args[2] ) );
    1187     rights     = (uint32_t)  hal_remote_l64( XPTR( client_cxy , &desc->args[3] ) );
    1188     uid        = (uid_t)     hal_remote_l64( XPTR( client_cxy , &desc->args[4] ) );
    1189     gid        = (gid_t)     hal_remote_l64( XPTR( client_cxy , &desc->args[5] ) );
     1262    attr       = (uint32_t)  hal_remote_l64( XPTR( client_cxy , &desc->args[1] ) );
     1263    rights     = (uint32_t)  hal_remote_l64( XPTR( client_cxy , &desc->args[2] ) );
     1264    uid        = (uid_t)     hal_remote_l64( XPTR( client_cxy , &desc->args[3] ) );
     1265    gid        = (gid_t)     hal_remote_l64( XPTR( client_cxy , &desc->args[4] ) );
    11901266
    11911267    // call local kernel function
    11921268    error = vfs_inode_create( fs_type,
    1193                               inode_type,
    11941269                              attr,
    11951270                              rights,
     
    11991274
    12001275    // set output arguments
    1201     hal_remote_s64( XPTR( client_cxy , &desc->args[6] ) , (uint64_t)inode_xp );
    1202     hal_remote_s64( XPTR( client_cxy , &desc->args[7] ) , (uint64_t)error );
     1276    hal_remote_s64( XPTR( client_cxy , &desc->args[5] ) , (uint64_t)inode_xp );
     1277    hal_remote_s64( XPTR( client_cxy , &desc->args[6] ) , (uint64_t)error );
    12031278
    12041279#if DEBUG_RPC_VFS_INODE_CREATE
     
    16011676
    16021677/////////////////////////////////////////////////////////
    1603 void rpc_vfs_fs_get_dentry_client( cxy_t         cxy,
     1678void rpc_vfs_fs_new_dentry_client( cxy_t         cxy,
    16041679                                   vfs_inode_t * parent_inode,    // in
    16051680                                   char        * name,            // in
     
    16431718
    16441719//////////////////////////////////////////////
    1645 void rpc_vfs_fs_get_dentry_server( xptr_t xp )
     1720void rpc_vfs_fs_new_dentry_server( xptr_t xp )
    16461721{
    16471722#if DEBUG_RPC_VFS_FS_GET_DENTRY
     
    16741749
    16751750    // call the kernel function
    1676     error = vfs_fs_get_dentry( parent , name_copy , child_xp );
     1751    error = vfs_fs_new_dentry( parent , name_copy , child_xp );
    16771752
    16781753    // set output argument
     
    22452320
    22462321/////////////////////////////////////////////////////////////////////////////////////////
    2247 // [24]          undefined slot
    2248 /////////////////////////////////////////////////////////////////////////////////////////
     2322// [25]          Marshaling functions attached to RPC_MAPPER_SYNC
     2323/////////////////////////////////////////////////////////////////////////////////////////
     2324
     2325///////////////////////////////////////////////////
     2326void rpc_mapper_sync_client( cxy_t             cxy,
     2327                             struct mapper_s * mapper,
     2328                             error_t         * error )
     2329{
     2330#if DEBUG_RPC_MAPPER_SYNC
     2331thread_t * this = CURRENT_THREAD;
     2332uint32_t cycle = (uint32_t)hal_get_cycles();
     2333if( cycle > DEBUG_RPC_MAPPER_SYNC )
     2334printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n",
     2335__FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle );
     2336#endif
     2337
     2338    uint32_t responses = 1;
     2339
     2340    // initialise RPC descriptor header
     2341    rpc_desc_t  rpc;
     2342    rpc.index    = RPC_MAPPER_SYNC;
     2343    rpc.blocking = true;
     2344    rpc.rsp      = &responses;
     2345
     2346    // set input arguments in RPC descriptor
     2347    rpc.args[0] = (uint64_t)(intptr_t)mapper;
     2348
     2349    // register RPC request in remote RPC fifo
     2350    rpc_send( cxy , &rpc );
     2351
     2352    // get output values from RPC descriptor
     2353    *error   = (error_t)rpc.args[1];
     2354
     2355#if DEBUG_RPC_MAPPER_SYNC
     2356cycle = (uint32_t)hal_get_cycles();
     2357if( cycle > DEBUG_RPC_MAPPER_SYNC )
     2358printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n",
     2359__FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle );
     2360#endif
     2361}
     2362
     2363////////////////////////////////////////
     2364void rpc_mapper_sync_server( xptr_t xp )
     2365{
     2366#if DEBUG_RPC_MAPPER_SYNC
     2367thread_t * this = CURRENT_THREAD;
     2368uint32_t cycle = (uint32_t)hal_get_cycles();
     2369if( cycle > DEBUG_RPC_MAPPER_SYNC )
     2370printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n",
     2371__FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle );
     2372#endif
     2373
     2374    mapper_t * mapper;
     2375    error_t    error;
     2376
     2377    // get client cluster identifier and pointer on RPC descriptor
     2378    cxy_t        client_cxy  = GET_CXY( xp );
     2379    rpc_desc_t * desc        = GET_PTR( xp );
     2380
     2381    // get arguments from client RPC descriptor
     2382    mapper  = (mapper_t *)(intptr_t)hal_remote_l64( XPTR( client_cxy , &desc->args[0] ) );
     2383
     2384    // call local kernel function
     2385    error = mapper_sync( mapper );
     2386
     2387    // set output argument to client RPC descriptor
     2388    hal_remote_s64( XPTR( client_cxy , &desc->args[1] ) , (uint64_t)error );
     2389
     2390#if DEBUG_RPC_MAPPER_SYNC
     2391cycle = (uint32_t)hal_get_cycles();
     2392if( cycle > DEBUG_RPC_MAPPER_SYNC )
     2393printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n",
     2394__FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle );
     2395#endif
     2396}
    22492397
    22502398/////////////////////////////////////////////////////////////////////////////////////////
  • trunk/kernel/kern/rpc.h

    r619 r623  
    6868    RPC_THREAD_USER_CREATE        = 6,
    6969    RPC_THREAD_KERNEL_CREATE      = 7,
    70     RPC_UNDEFINED_8               = 8,
     70    RPC_VFS_FS_UPDATE_DENTRY      = 8,
    7171    RPC_PROCESS_SIGACTION         = 9,
    7272
     
    8686    RPC_KCM_ALLOC                 = 22,
    8787    RPC_KCM_FREE                  = 23,
    88     RPC_UNDEFINED_24              = 24,
     88    RPC_MAPPER_SYNC               = 24,
    8989    RPC_MAPPER_HANDLE_MISS        = 25,
    9090    RPC_VMM_DELETE_VSEG           = 26,
     
    305305
    306306/***********************************************************************************
    307  * [8] undefined slot
    308  **********************************************************************************/
    309 
    310 /***********************************************************************************
    311  * [9] The RPC_PROCESS_SIGACTION allows any client thread to request to any cluster
    312  * execute a given sigaction, defined by the <action_type> for a given process,
     307 * [8] The RPC_VFS_FS_UPDATE_DENTRY allows a client thread to request a remote
     308 * cluster to update the <size> field of a directory entry in the mapper of a
     309 * remote directory inode, identified by the <inode> local pointer.
     310 * The target entry name is identified by the <dentry> local pointer.
     311 ***********************************************************************************
     312 * @ cxy     : server cluster identifier.
     313 * @ inode   : [in] local pointer on remote directory inode.
     314 * @ dentry  : [in] local pointer on remote dentry.
     315 * @ size    : [in] new size value.
     316 * @ error   : [out] error status (0 if success).
     317 **********************************************************************************/
     318void rpc_vfs_fs_update_dentry_client( cxy_t                 cxy,
     319                                      struct vfs_inode_s  * inode,
     320                                      struct vfs_dentry_s * dentry,
     321                                      uint32_t              size,
     322                                      error_t             * error );
     323
     324void rpc_vfs_fs_update_dentry_server( xptr_t xp );
     325
     326/***********************************************************************************
     327 * [9] The RPC_PROCESS_SIGACTION allows a client thread to request a remote cluster
     328 * to execute a given sigaction, defined by the <action_type> for a given process,
    313329 * identified by the <pid> argument.
    314330 ***********************************************************************************
     
    340356void rpc_vfs_inode_create_client( cxy_t      cxy,
    341357                                  uint32_t   fs_type,
    342                                   uint32_t   inode_type,
    343358                                  uint32_t   attr,   
    344359                                  uint32_t   rights, 
     
    423438
    424439/***********************************************************************************
    425  * [16] The RPC_VFS_FS_GET_DENTRY calls the vfs_fs_get_dentry()
     440 * [16] The RPC_VFS_FS_GET_DENTRY calls the vfs_fs_new_dentry()
    426441 * function in a remote cluster containing a parent inode directory to scan the
    427442 * associated mapper, find a directory entry identified by its name, and update
     
    434449 * @ error          : [out] error status (0 if success).
    435450 **********************************************************************************/
    436 void rpc_vfs_fs_get_dentry_client( cxy_t                cxy,
     451void rpc_vfs_fs_new_dentry_client( cxy_t                cxy,
    437452                                   struct vfs_inode_s * parent_inode,
    438453                                   char               * name,
     
    440455                                   error_t            * error );
    441456
    442 void rpc_vfs_fs_get_dentry_server( xptr_t xp );
     457void rpc_vfs_fs_new_dentry_server( xptr_t xp );
    443458
    444459/***********************************************************************************
     
    564579
    565580/***********************************************************************************
    566  * [24] undefined slot
    567  **********************************************************************************/
     581 * [24] The RPC_MAPPER_SYNC allows a client thread to synchronize on disk
     582 * all dirty pages of a remote mapper.
     583 ***********************************************************************************
     584 * @ cxy       : server cluster identifier.
     585 * @ mapper    : [in] local pointer on mapper in server cluster.
     586 * @ error       : [out] error status (0 if success).
     587 **********************************************************************************/
     588void rpc_mapper_sync_client( cxy_t             cxy,
     589                             struct mapper_s * mapper,
     590                             error_t         * error );
     591
     592void rpc_mapper_sync_server( xptr_t xp );
    568593
    569594/***********************************************************************************
  • trunk/kernel/kern/thread.c

    r620 r623  
    13821382                               const char * string )
    13831383{
     1384
    13841385    cxy_t      thread_cxy = GET_CXY( thread_xp );
    13851386    thread_t * thread_ptr = GET_PTR( thread_xp );
    13861387
    1387 #if( DEBUG_BUSYLOCK )
    1388 
    1389     xptr_t    iter_xp;
    1390 
    1391     // get relevant info from target trhead descriptor
     1388#if DEBUG_BUSYLOCK
     1389
     1390    xptr_t     iter_xp;
     1391
     1392    // get relevant info from target thread descriptor
    13921393    uint32_t    locks   = hal_remote_l32( XPTR( thread_cxy , &thread_ptr->busylocks ) );
    13931394    trdid_t     trdid   = hal_remote_l32( XPTR( thread_cxy , &thread_ptr->trdid ) );
     
    14291430    remote_busylock_release( txt0_lock_xp );
    14301431
     1432#else
     1433
     1434printk("\n[ERROR] in %s : set DEBUG_BUSYLOCK in kernel_config.h for %s / thread(%x,%x)\n",
     1435__FUNCTION__, string, thread_cxy, thread_ptr );
     1436
     1437#endif
     1438
    14311439    return;
    14321440
    1433 #endif
    1434 
    1435     // display a warning
    1436     printk("\n[WARNING] set DEBUG_BUSYLOCK in kernel_config.h to display busylocks" );
    1437 
    14381441}  // end thread_display_busylock()
    14391442
  • trunk/kernel/kernel_config.h

    r620 r623  
    8181#define DEBUG_FATFS_FREE_CLUSTERS         0
    8282#define DEBUG_FATFS_GET_CLUSTER           0
    83 #define DEBUG_FATFS_GET_DENTRY            0
    8483#define DEBUG_FATFS_GET_USER_DIR          0
    8584#define DEBUG_FATFS_MOVE_PAGE             0
    86 #define DEBUG_FATFS_RELEASE_INODE         0
     85#define DEBUG_FATFS_NEW_DENTRY            0
     86#define DEBUG_FATFS_RELEASE_INODE         1
    8787#define DEBUG_FATFS_REMOVE_DENTRY         0
    8888#define DEBUG_FATFS_SYNC_FAT              0
    8989#define DEBUG_FATFS_SYNC_FSINFO           0
    9090#define DEBUG_FATFS_SYNC_INODE            0
     91#define DEBUG_FATFS_UPDATE_DENTRY         0
    9192
    9293#define DEBUG_HAL_GPT_SET_PTE             0
     
    112113#define DEBUG_MAPPER_MOVE_USER            0
    113114#define DEBUG_MAPPER_MOVE_KERNEL          0
     115#define DEBUG_MAPPER_SYNC                 0
    114116
    115117#define DEBUG_MUTEX                       0
     
    130132#define DEBUG_PROCESS_ZERO_CREATE         0
    131133
    132 #define DEBUG_QUEUELOCK_TYPE              0    // lock type (0 is undefined)
     134#define DEBUG_QUEUELOCK_TYPE              0    // lock type (0 : undefined / 1000 : all types)
    133135
    134136#define DEBUG_RPC_CLIENT_GENERIC          0
     
    157159#define DEBUG_RPC_VMM_DELETE_VSEG         0
    158160
    159 #define DEBUG_RWLOCK_TYPE                 0    // lock type (0 is undefined)
     161#define DEBUG_RWLOCK_TYPE                 0    // lock type (0 : undefined / 1000 : all types)
    160162
    161163#define DEBUG_SCHED_HANDLE_SIGNALS        2
     
    234236#define DEBUG_VFS_OPENDIR                 0
    235237#define DEBUG_VFS_STAT                    0
    236 #define DEBUG_VFS_UNLINK                  0
     238#define DEBUG_VFS_UNLINK                  1
    237239
    238240#define DEBUG_VMM_CREATE_VSEG             0
     
    247249#define DEBUG_VMM_MMAP_ALLOC              0
    248250#define DEBUG_VMM_PAGE_ALLOCATE           0
     251#define DEBUG_VMM_RESIZE_VSEG             0
    249252#define DEBUG_VMM_SET_COW                 0
    250253#define DEBUG_VMM_UPDATE_PTE              0
  • trunk/kernel/libk/busylock.h

    r563 r623  
    3434 * a shared object located in a given cluster, made by thread(s) running in same cluster.
    3535 * It uses a busy waiting policy when the lock is taken by another thread, and should
    36  * be used to execute very short actions, such as basic allocators, or to protect
    37  * higher level synchronisation objects, such as queuelock or rwlock.
    38  * WARNING: a thread cannot yield when it is owning a busylock (local or remote).
     36 * be used to execute very short actions, such as accessing basic allocators, or higher
     37 * level synchronisation objects (barriers, queuelocks, or rwlocks).
     38 * WARNING: a thread cannot yield when it is owning a busylock.
    3939 *
    4040 * - To acquire the lock, we use a ticket policy to avoid starvation: the calling thread
  • trunk/kernel/libk/grdxt.h

    r610 r623  
    132132 * @ start_key  : key starting value for the scan.
    133133 * @ found_key  : [out] buffer for found key value.
    134  * return pointer on first valid item if found / return NULL if not found.
     134 * @ return pointer on first valid item if found / return NULL if not found.
    135135 ******************************************************************************************/
    136136void * grdxt_get_first( grdxt_t  * rt,
  • trunk/kernel/libk/queuelock.c

    r610 r623  
    6666    busylock_acquire( &lock->lock );
    6767
     68#if DEBUG_QUEUELOCK_TYPE
     69uint32_t   lock_type = lock->lock.type;
     70#endif
     71
    6872    // block and deschedule if lock already taken
    6973    while( lock->taken )
     
    7175
    7276#if DEBUG_QUEUELOCK_TYPE
    73 uint32_t   lock_type = lock->lock.type;
    74 if( DEBUG_QUEUELOCK_TYPE == lock_type )
     77if( (DEBUG_QUEUELOCK_TYPE == lock_type) || (DEBUG_QUEUELOCK_TYPE == 1000) )
    7578printk("\n[%s ] thread[%x,%x] BLOCK on q_lock %s [%x,%x]\n",
    7679__FUNCTION__, this->process->pid, this->trdid,
     
    97100
    98101#if DEBUG_QUEUELOCK_TYPE
    99 if( DEBUG_QUEUELOCK_TYPE == lock_type )
     102if( (DEBUG_QUEUELOCK_TYPE == lock_type) || (DEBUG_QUEUELOCK_TYPE == 1000) )
    100103printk("\n[%s] thread[%x,%x] ACQUIRE q_lock %s [%x,%x]\n",
    101104__FUNCTION__, this->process->pid, this->trdid,
     
    123126uint32_t   lock_type = lock->lock.type;
    124127thread_t * this      = CURRENT_THREAD;
    125 if( DEBUG_QUEUELOCK_TYPE == lock_type )
     128if( (DEBUG_QUEUELOCK_TYPE == lock_type) || (DEBUG_QUEUELOCK_TYPE == 1000) )
    126129printk("\n[%s] thread[%x,%x] RELEASE q_lock %s [%x,%x]\n",
    127130__FUNCTION__, this->process->pid, this->trdid,
     
    139142
    140143#if DEBUG_QUEUELOCK_TYPE
    141 if( DEBUG_QUEUELOCK_TYPE == lock_type )
     144if( (DEBUG_QUEUELOCK_TYPE == lock_type) || (DEBUG_QUEUELOCK_TYPE == 1000) )
    142145printk("\n[%s] thread[%x,%x] UNBLOCK thread [%x,%x] / q_lock %s [%x,%x]\n",
    143146__FUNCTION__, this->process->pid, this->trdid, thread->process->pid, thread->trdid,
  • trunk/kernel/libk/remote_barrier.c

    r619 r623  
    245245}  // end generic_barrier_wait()
    246246
    247 
     247/////////////////////////////////////////////////////
     248void generic_barrier_display( xptr_t gen_barrier_xp )
     249{
     250    // get cluster and local pointer
     251    generic_barrier_t * gen_barrier_ptr = GET_PTR( gen_barrier_xp );
     252    cxy_t               gen_barrier_cxy = GET_CXY( gen_barrier_xp );
     253
     254    // get barrier type and extend pointer
     255    bool_t  is_dqt = hal_remote_l32( XPTR( gen_barrier_cxy , &gen_barrier_ptr->is_dqt ) );
     256    void  * extend = hal_remote_lpt( XPTR( gen_barrier_cxy , &gen_barrier_ptr->extend ) );
     257
     258    // buil extended pointer on the implementation specific barrier descriptor
     259    xptr_t barrier_xp = XPTR( gen_barrier_cxy , extend );
     260
     261    // display barrier state
     262    if( is_dqt ) dqt_barrier_display( barrier_xp );
     263    else         simple_barrier_display( barrier_xp );
     264}
    248265
    249266
     
    454471
    455472}  // end simple_barrier_wait()
     473
     474/////////////////////////////////////////////////
     475void simple_barrier_display( xptr_t  barrier_xp )
     476{
     477    // get cluster and local pointer on simple barrier
     478    simple_barrier_t * barrier_ptr = GET_PTR( barrier_xp );
     479    cxy_t              barrier_cxy = GET_CXY( barrier_xp );
     480
     481    // get barrier global parameters
     482    uint32_t current  = hal_remote_l32( XPTR( barrier_cxy , &barrier_ptr->current ) );
     483    uint32_t arity    = hal_remote_l32( XPTR( barrier_cxy , &barrier_ptr->arity   ) );
     484
     485    printk("\n***** simple barrier : %d arrived threads on %d *****\n",
     486    current, arity );
     487
     488}   // end simple_barrier_display()
     489
     490
    456491
    457492
     
    493528
    494529// check x_size and y_size arguments
    495 assert( (z <= 16) , "DQT dqth larger than (16*16)\n");
     530assert( (z <= 16) , "DQT mesh size larger than (16*16)\n");
    496531
    497532// check RPC descriptor size
     
    9731008}  // end dqt_barrier_wait()
    9741009
    975 
    976 ////////////////////////////////////////////////////////////////////////////////////////////
    977 //          DQT static functions
    978 ////////////////////////////////////////////////////////////////////////////////////////////
    979 
    980 
    981 //////////////////////////////////////////////////////////////////////////////////////////
    982 // This recursive function decrements the distributed "count" variables,
    983 // traversing the DQT from bottom to root.
    984 // The last arrived thread reset the local node before returning.
    985 //////////////////////////////////////////////////////////////////////////////////////////
    986 static void dqt_barrier_increment( xptr_t  node_xp )
    987 {
    988     uint32_t   expected;
    989     uint32_t   sense;
    990     uint32_t   arity;
    991 
    992     thread_t * this = CURRENT_THREAD;
    993 
    994     // get node cluster and local pointer
    995     dqt_node_t * node_ptr = GET_PTR( node_xp );
    996     cxy_t        node_cxy = GET_CXY( node_xp );
    997 
    998     // build relevant extended pointers
    999     xptr_t  arity_xp   = XPTR( node_cxy , &node_ptr->arity );
    1000     xptr_t  sense_xp   = XPTR( node_cxy , &node_ptr->sense );
    1001     xptr_t  current_xp = XPTR( node_cxy , &node_ptr->current );
    1002     xptr_t  lock_xp    = XPTR( node_cxy , &node_ptr->lock );
    1003     xptr_t  root_xp    = XPTR( node_cxy , &node_ptr->root );
    1004 
    1005 #if DEBUG_BARRIER_WAIT
    1006 uint32_t   cycle = (uint32_t)hal_get_cycles();
    1007 uint32_t   level = hal_remote_l32( XPTR( node_cxy, &node_ptr->level ) );
    1008 if( cycle > DEBUG_BARRIER_WAIT )
    1009 printk("\n[%s] thread[%x,%x] increments DQT node(%d,%d,%d) / cycle %d\n",
    1010 __FUNCTION__ , this->process->pid, this->trdid,
    1011 HAL_X_FROM_CXY(node_cxy), HAL_Y_FROM_CXY(node_cxy), level );
    1012 #endif
    1013 
    1014     // get extended pointer on parent node
    1015     xptr_t  parent_xp  = hal_remote_l64( XPTR( node_cxy , &node_ptr->parent_xp ) );
    1016 
    1017     // take busylock
    1018     remote_busylock_acquire( lock_xp );
    1019    
    1020     // get sense and arity values from barrier descriptor
    1021     sense = hal_remote_l32( sense_xp );
    1022     arity = hal_remote_l32( arity_xp );
    1023 
    1024     // compute expected value
    1025     expected = (sense == 0) ? 1 : 0;
    1026 
    1027     // increment current number of arrived threads / get value before increment
    1028     uint32_t current = hal_remote_atomic_add( current_xp , 1 );
    1029 
    1030     // last arrived thread reset the local node, makes the recursive call
    1031     // on parent node, and reactivates all waiting thread when returning.
    1032     // other threads block, register in queue, and deschedule.
    1033 
    1034     if ( current == (arity - 1) )                        // last thread 
    1035     {
    1036 
    1037 #if DEBUG_BARRIER_WAIT
    1038 if( cycle > DEBUG_BARRIER_WAIT )
    1039 printk("\n[%s] thread[%x,%x] reset DQT node(%d,%d,%d)\n",
    1040 __FUNCTION__ , this->process->pid, this->trdid,
    1041 HAL_X_FROM_CXY(node_cxy), HAL_Y_FROM_CXY(node_cxy), level );
    1042 #endif
    1043         // reset the current node
    1044         hal_remote_s32( sense_xp   , expected );
    1045         hal_remote_s32( current_xp , 0 );
    1046 
    1047         // release busylock protecting the current node
    1048         remote_busylock_release( lock_xp );
    1049 
    1050         // recursive call on parent node when current node is not the root
    1051         if( parent_xp != XPTR_NULL) dqt_barrier_increment( parent_xp );
    1052 
    1053         // unblock all waiting threads on this node
    1054         while( xlist_is_empty( root_xp ) == false )
    1055         {
    1056             // get pointers on first waiting thread
    1057             xptr_t     thread_xp  = XLIST_FIRST( root_xp , thread_t , wait_list );
    1058             cxy_t      thread_cxy = GET_CXY( thread_xp );
    1059             thread_t * thread_ptr = GET_PTR( thread_xp );
    1060 
    1061 #if (DEBUG_BARRIER_WAIT & 1)
    1062 trdid_t     trdid   = hal_remote_l32( XPTR( thread_cxy , &thread_ptr->trdid ) );
    1063 process_t * process = hal_remote_lpt( XPTR( thread_cxy , &thread_ptr->process ) );
    1064 pid_t       pid     = hal_remote_l32( XPTR( thread_cxy , &process->pid ) );
    1065 if( cycle > DEBUG_BARRIER_WAIT )
    1066 printk("\n[%s] thread[%x,%x] unblock thread[%x,%x]\n",
    1067 __FUNCTION__, this->process->pid, this->trdid, pid, trdid );
    1068 #endif
    1069             // remove waiting thread from queue
    1070             xlist_unlink( XPTR( thread_cxy , &thread_ptr->wait_list ) );
    1071 
    1072             // unblock waiting thread
    1073             thread_unblock( thread_xp , THREAD_BLOCKED_USERSYNC );
    1074         }
    1075     }
    1076     else                                               // not the last thread
    1077     {
    1078         // get extended pointer on xlist entry from thread
    1079         xptr_t  entry_xp = XPTR( local_cxy , &this->wait_list );
    1080        
    1081         // register calling thread in barrier waiting queue
    1082         xlist_add_last( root_xp , entry_xp );
    1083 
    1084         // block calling thread
    1085         thread_block( XPTR( local_cxy , this ) , THREAD_BLOCKED_USERSYNC );
    1086 
    1087         // release busylock protecting the remote_barrier
    1088         remote_busylock_release( lock_xp );
    1089 
    1090 #if DEBUG_BARRIER_WAIT
    1091 if( cycle > DEBUG_BARRIER_WAIT )
    1092 printk("\n[%s] thread[%x,%x] blocks on node(%d,%d,%d)\n",
    1093 __FUNCTION__ , this->process->pid, this->trdid,
    1094 HAL_X_FROM_CXY(node_cxy), HAL_Y_FROM_CXY(node_cxy), level );
    1095 #endif
    1096         // deschedule
    1097         sched_yield("blocked on barrier");
    1098     }
    1099 
    1100     return;
    1101 
    1102 } // end dqt_barrier_decrement()
    1103 
    1104 #if DEBUG_BARRIER_CREATE
    1105 
    1106 ////////////////////////////////////////////////////////////////////////////////////////////
    1107 // This debug function displays all DQT nodes in all clusters.
    1108 ////////////////////////////////////////////////////////////////////////////////////////////
    1109 // @ barrier_xp   : extended pointer on DQT barrier descriptor.
    1110 ////////////////////////////////////////////////////////////////////////////////////////////
    1111 static void dqt_barrier_display( xptr_t  barrier_xp )
     1010//////////////////////////////////////////////
     1011void dqt_barrier_display( xptr_t  barrier_xp )
    11121012{
    11131013    // get cluster and local pointer on DQT barrier
     
    11471047                     uint32_t level = hal_remote_l32( XPTR( node_cxy , &node_ptr->level       ));
    11481048                     uint32_t arity = hal_remote_l32( XPTR( node_cxy , &node_ptr->arity       ));
     1049                     uint32_t count = hal_remote_l32( XPTR( node_cxy , &node_ptr->current     ));
    11491050                     xptr_t   pa_xp = hal_remote_l32( XPTR( node_cxy , &node_ptr->parent_xp   ));
    11501051                     xptr_t   c0_xp = hal_remote_l32( XPTR( node_cxy , &node_ptr->child_xp[0] ));
     
    11531054                     xptr_t   c3_xp = hal_remote_l32( XPTR( node_cxy , &node_ptr->child_xp[3] ));
    11541055
    1155                      printk("   . level %d : (%x,%x) / arity %d / P(%x,%x) / C0(%x,%x)"
     1056                     printk("   . level %d : (%x,%x) / %d on %d / P(%x,%x) / C0(%x,%x)"
    11561057                            " C1(%x,%x) / C2(%x,%x) / C3(%x,%x)\n",
    1157                      level, node_cxy, node_ptr, arity,
     1058                     level, node_cxy, node_ptr, count, arity,
    11581059                     GET_CXY(pa_xp), GET_PTR(pa_xp),
    11591060                     GET_CXY(c0_xp), GET_PTR(c0_xp),
     
    11671068}   // end dqt_barrier_display()
    11681069
    1169 #endif
     1070
     1071//////////////////////////////////////////////////////////////////////////////////////////
     1072// This static (recursive) function is called by the dqt_barrier_wait() function.
     1073// It traverses the DQT from bottom to root, and decrements the "current" variables.
     1074// For each traversed node, it blocks and deschedules if it is not the last expected
     1075//  thread. The last arrived thread reset the local node before returning.
     1076//////////////////////////////////////////////////////////////////////////////////////////
     1077static void dqt_barrier_increment( xptr_t  node_xp )
     1078{
     1079    uint32_t   expected;
     1080    uint32_t   sense;
     1081    uint32_t   arity;
     1082
     1083    thread_t * this = CURRENT_THREAD;
     1084
     1085    // get node cluster and local pointer
     1086    dqt_node_t * node_ptr = GET_PTR( node_xp );
     1087    cxy_t        node_cxy = GET_CXY( node_xp );
     1088
     1089    // build relevant extended pointers
     1090    xptr_t  arity_xp   = XPTR( node_cxy , &node_ptr->arity );
     1091    xptr_t  sense_xp   = XPTR( node_cxy , &node_ptr->sense );
     1092    xptr_t  current_xp = XPTR( node_cxy , &node_ptr->current );
     1093    xptr_t  lock_xp    = XPTR( node_cxy , &node_ptr->lock );
     1094    xptr_t  root_xp    = XPTR( node_cxy , &node_ptr->root );
     1095
     1096#if DEBUG_BARRIER_WAIT
     1097uint32_t   cycle = (uint32_t)hal_get_cycles();
     1098uint32_t   level = hal_remote_l32( XPTR( node_cxy, &node_ptr->level ) );
     1099if( cycle > DEBUG_BARRIER_WAIT )
     1100printk("\n[%s] thread[%x,%x] increments DQT node(%d,%d,%d) / cycle %d\n",
     1101__FUNCTION__ , this->process->pid, this->trdid,
     1102HAL_X_FROM_CXY(node_cxy), HAL_Y_FROM_CXY(node_cxy), level );
     1103#endif
     1104
     1105    // get extended pointer on parent node
     1106    xptr_t  parent_xp  = hal_remote_l64( XPTR( node_cxy , &node_ptr->parent_xp ) );
     1107
     1108    // take busylock
     1109    remote_busylock_acquire( lock_xp );
     1110   
     1111    // get sense and arity values from barrier descriptor
     1112    sense = hal_remote_l32( sense_xp );
     1113    arity = hal_remote_l32( arity_xp );
     1114
     1115    // compute expected value
     1116    expected = (sense == 0) ? 1 : 0;
     1117
     1118    // increment current number of arrived threads / get value before increment
     1119    uint32_t current = hal_remote_atomic_add( current_xp , 1 );
     1120
     1121    // last arrived thread reset the local node, makes the recursive call
     1122    // on parent node, and reactivates all waiting thread when returning.
     1123    // other threads block, register in queue, and deschedule.
     1124
     1125    if ( current == (arity - 1) )                        // last thread 
     1126    {
     1127
     1128#if DEBUG_BARRIER_WAIT
     1129if( cycle > DEBUG_BARRIER_WAIT )
     1130printk("\n[%s] thread[%x,%x] reset DQT node(%d,%d,%d)\n",
     1131__FUNCTION__ , this->process->pid, this->trdid,
     1132HAL_X_FROM_CXY(node_cxy), HAL_Y_FROM_CXY(node_cxy), level );
     1133#endif
     1134        // reset the current node
     1135        hal_remote_s32( sense_xp   , expected );
     1136        hal_remote_s32( current_xp , 0 );
     1137
     1138        // release busylock protecting the current node
     1139        remote_busylock_release( lock_xp );
     1140
     1141        // recursive call on parent node when current node is not the root
     1142        if( parent_xp != XPTR_NULL) dqt_barrier_increment( parent_xp );
     1143
     1144        // unblock all waiting threads on this node
     1145        while( xlist_is_empty( root_xp ) == false )
     1146        {
     1147            // get pointers on first waiting thread
     1148            xptr_t     thread_xp  = XLIST_FIRST( root_xp , thread_t , wait_list );
     1149            cxy_t      thread_cxy = GET_CXY( thread_xp );
     1150            thread_t * thread_ptr = GET_PTR( thread_xp );
     1151
     1152#if (DEBUG_BARRIER_WAIT & 1)
     1153trdid_t     trdid   = hal_remote_l32( XPTR( thread_cxy , &thread_ptr->trdid ) );
     1154process_t * process = hal_remote_lpt( XPTR( thread_cxy , &thread_ptr->process ) );
     1155pid_t       pid     = hal_remote_l32( XPTR( thread_cxy , &process->pid ) );
     1156if( cycle > DEBUG_BARRIER_WAIT )
     1157printk("\n[%s] thread[%x,%x] unblock thread[%x,%x]\n",
     1158__FUNCTION__, this->process->pid, this->trdid, pid, trdid );
     1159#endif
     1160            // remove waiting thread from queue
     1161            xlist_unlink( XPTR( thread_cxy , &thread_ptr->wait_list ) );
     1162
     1163            // unblock waiting thread
     1164            thread_unblock( thread_xp , THREAD_BLOCKED_USERSYNC );
     1165        }
     1166    }
     1167    else                                               // not the last thread
     1168    {
     1169        // get extended pointer on xlist entry from thread
     1170        xptr_t  entry_xp = XPTR( local_cxy , &this->wait_list );
     1171       
     1172        // register calling thread in barrier waiting queue
     1173        xlist_add_last( root_xp , entry_xp );
     1174
     1175        // block calling thread
     1176        thread_block( XPTR( local_cxy , this ) , THREAD_BLOCKED_USERSYNC );
     1177
     1178        // release busylock protecting the remote_barrier
     1179        remote_busylock_release( lock_xp );
     1180
     1181#if DEBUG_BARRIER_WAIT
     1182if( cycle > DEBUG_BARRIER_WAIT )
     1183printk("\n[%s] thread[%x,%x] blocks on node(%d,%d,%d)\n",
     1184__FUNCTION__ , this->process->pid, this->trdid,
     1185HAL_X_FROM_CXY(node_cxy), HAL_Y_FROM_CXY(node_cxy), level );
     1186#endif
     1187        // deschedule
     1188        sched_yield("blocked on barrier");
     1189    }
     1190
     1191    return;
     1192
     1193} // end dqt_barrier_decrement()
     1194
     1195
  • trunk/kernel/libk/remote_barrier.h

    r619 r623  
    4242 * used by the kernel. ALMOS-MKH uses only the barrier virtual address as an identifier.
    4343 * For each user barrier, ALMOS-MKH creates a kernel structure, dynamically allocated
    44  * by the "generic_barrier_create()" function, destroyed by the "remote_barrier_destroy()"
    45  * function, and used by the "generic_barrier_wait()" function.
     44 * by the generic_barrier_create() function, destroyed by the generic_barrier_destroy()
     45 * function, and used by the generic_barrier_wait() function.
    4646 *
    4747 * Implementation note:
     
    5858 *    (x_size * ysize) mesh, including cluster (0,0), with nthreads per cluster, and called
    5959 *    DQT : Distributed Quad Tree. This DQT implementation supposes a regular architecture,
     60                     uint32_t arity = hal_remote_l32( XPTR( node_cxy , &node_ptr->arity       ));
    6061 *    and a strong contraint on the threads placement: exactly "nthreads" threads per
    6162 *    cluster in the (x_size * y_size) mesh.
     
    141142
    142143
    143 
     144/*****************************************************************************************
     145 * This debug function uses remote accesses to display the current state of a generic
     146 * barrier identified by the <gen_barrier_xp> argument.
     147 * It calls the relevant function (simple or DQT) to display relevant information.
     148 * It can be called by a thread running in any cluster.
     149 *****************************************************************************************
     150 * @ barrier_xp   : extended pointer on generic barrier descriptor.
     151 ****************************************************************************************/
     152
     153void generic_barrier_display( xptr_t gen_barrier_xp );
    144154
    145155
     
    192202void simple_barrier_wait( xptr_t   barrier_xp );
    193203
     204/*****************************************************************************************
     205 * This debug function uses remote accesses to display the current state of a simple
     206 * barrier identified by the <barrier_xp> argument.
     207 * It can be called by a thread running in any cluster.
     208 *****************************************************************************************
     209 * @ barrier_xp   : extended pointer on simple barrier descriptor.
     210 ****************************************************************************************/
     211void simple_barrier_display( xptr_t barrier_xp );
    194212
    195213
     
    281299void dqt_barrier_wait( xptr_t   barrier_xp );
    282300
    283 
     301/*****************************************************************************************
     302 * This debug function uses remote accesses to display the current state of all
     303 * ditributed nodes in a DQT barrier identified by the <barrier_xp> argument.
     304 * It can be called by a thread running in any cluster.
     305 *****************************************************************************************
     306 * @ barrier_xp   : extended pointer on DQT barrier descriptor.
     307 ****************************************************************************************/
     308void dqt_barrier_display( xptr_t barrier_xp );
    284309
    285310#endif  /* _REMOTE_BARRIER_H_ */
  • trunk/kernel/libk/remote_queuelock.c

    r610 r623  
    9191
    9292#if DEBUG_QUEUELOCK_TYPE
    93 if( DEBUG_QUEUELOCK_TYPE == lock_type )
     93if( (DEBUG_QUEUELOCK_TYPE == lock_type) || (DEBUG_QUEUELOCK_TYPE == 1000) )
    9494printk("\n[%s] thread[%x,%x] BLOCK on q_lock %s [%x,%x]\n",
    9595__FUNCTION__, this->process->pid, this->trdid,
     
    117117
    118118#if DEBUG_QUEUELOCK_TYPE
    119 if( DEBUG_QUEUELOCK_TYPE == lock_type )
     119if( (DEBUG_QUEUELOCK_TYPE == lock_type) || (DEBUG_QUEUELOCK_TYPE == 1000) )
    120120printk("\n[%s] thread[%x,%x] ACQUIRE q_lock %s [%x,%x]\n",
    121121__FUNCTION__, this->process->pid, this->trdid,
     
    152152thread_t * this      = CURRENT_THREAD;
    153153uint32_t   lock_type = hal_remote_l32( XPTR( lock_cxy , &lock_ptr->lock.type ) );
    154 if( DEBUG_QUEUELOCK_TYPE == lock_type )
     154if( (DEBUG_QUEUELOCK_TYPE == lock_type) || (DEBUG_QUEUELOCK_TYPE == 1000) )
    155155printk("\n[%s] thread[%x,%x] RELEASE q_lock %s (%x,%x)\n",
    156156__FUNCTION__, this->process->pid, this->trdid,
     
    171171
    172172#if DEBUG_QUEUELOCK_TYPE
    173 if( DEBUG_QUEUELOCK_TYPE == lock_type )
     173if( (DEBUG_QUEUELOCK_TYPE == lock_type) || (DEBUG_QUEUELOCK_TYPE == 1000) )
    174174{
    175175    trdid_t     trdid   = hal_remote_l32( XPTR( thread_cxy , &thread_ptr->trdid ) );
  • trunk/kernel/libk/remote_rwlock.c

    r610 r623  
    5555#if DEBUG_RWLOCK_TYPE
    5656thread_t * this = CURRENT_THREAD;
    57 if( type == DEBUG_RWLOCK_TYPE )
     57if( DEBUG_RWLOCK_TYPE == type )
    5858printk("\n[%s] thread[%x,%x] initialise lock %s [%x,%x]\n",
    5959__FUNCTION__, this->process->pid, this->trdid,
     
    9393
    9494#if DEBUG_RWLOCK_TYPE
    95 if( lock_type == DEBUG_RWLOCK_TYPE )
     95if( (DEBUG_RWLOCK_TYPE == lock_type) || (DEBUG_RWLOCK_TYPE == 1000) )
    9696printk("\n[%s] thread[%x,%x] READ BLOCK on rwlock %s [%x,%x] / taken %d / count %d\n",
    9797__FUNCTION__, this->process->pid, this->trdid,
     
    124124
    125125#if DEBUG_RWLOCK_TYPE
    126 if( lock_type == DEBUG_RWLOCK_TYPE )
     126if( (DEBUG_RWLOCK_TYPE == lock_type) || (DEBUG_RWLOCK_TYPE == 1000) )
    127127printk("\n[%s] thread[%x,%x] READ ACQUIRE rwlock %s [%x,%x] / taken = %d / count = %d\n",
    128128__FUNCTION__, this->process->pid, this->trdid,
     
    166166
    167167#if DEBUG_RWLOCK_TYPE
    168 if( lock_type == DEBUG_RWLOCK_TYPE )
     168if( (DEBUG_RWLOCK_TYPE == lock_type) || (DEBUG_RWLOCK_TYPE == 1000) )
    169169printk("\n[%s] thread[%x,%x] WRITE BLOCK on rwlock %s [%x,%x] / taken %d / count %d\n",
    170170__FUNCTION__, this->process->pid, this->trdid,
     
    196196
    197197#if DEBUG_RWLOCK_TYPE
    198 if( lock_type == DEBUG_RWLOCK_TYPE )
     198if( (DEBUG_RWLOCK_TYPE == lock_type) || (DEBUG_RWLOCK_TYPE == 1000) )
    199199printk("\n[%s] thread[%x,%x] WRITE ACQUIRE rwlock %s [%x,%x] / taken %d / count %d\n",
    200200__FUNCTION__, this->process->pid, this->trdid,
     
    235235uint32_t   lock_type = hal_remote_l32( XPTR( lock_cxy , &lock_ptr->lock.type ) );
    236236xptr_t     taken_xp  = XPTR( lock_cxy , &lock_ptr->taken );
    237 if( lock_type == DEBUG_RWLOCK_TYPE )
     237if( (DEBUG_RWLOCK_TYPE == lock_type) || (DEBUG_RWLOCK_TYPE == 1000) )
    238238printk("\n[%s] thread[%x,%x] READ RELEASE rwlock %s [%x,%x] / taken %d / count %d\n",
    239239__FUNCTION__, this->process->pid, this->trdid,
     
    258258
    259259#if DEBUG_RWLOCK_TYPE
    260 if( lock_type == DEBUG_RWLOCK_TYPE )
     260if( (DEBUG_RWLOCK_TYPE == lock_type) || (DEBUG_RWLOCK_TYPE == 1000) )
    261261{
    262262    trdid_t     trdid     = hal_remote_l32( XPTR( thread_cxy , &thread_ptr->trdid ) );
     
    289289
    290290#if DEBUG_RWLOCK_TYPE
    291 if( lock_type == DEBUG_RWLOCK_TYPE )
     291if( (DEBUG_RWLOCK_TYPE == lock_type) || (DEBUG_RWLOCK_TYPE == 1000) )
    292292{
    293293    trdid_t     trdid     = hal_remote_l32( XPTR( thread_cxy , &thread_ptr->trdid ) );
     
    334334uint32_t   lock_type = hal_remote_l32( XPTR( lock_cxy , &lock_ptr->lock.type ) );
    335335xptr_t     count_xp  = XPTR( lock_cxy , &lock_ptr->count );
    336 if( lock_type == DEBUG_RWLOCK_TYPE )
     336if( (DEBUG_RWLOCK_TYPE == lock_type) || (DEBUG_RWLOCK_TYPE == 1000) )
    337337printk("\n[%s] thread[%x,%x] WRITE RELEASE rwlock %s [%x,%x] / taken %d / count %d\n",
    338338__FUNCTION__, this->process->pid, this->trdid,
     
    356356
    357357#if DEBUG_RWLOCK_TYPE
    358 if( lock_type == DEBUG_RWLOCK_TYPE )
     358if( (DEBUG_RWLOCK_TYPE == lock_type) || (DEBUG_RWLOCK_TYPE == 1000) )
    359359{
    360360    trdid_t     trdid     = hal_remote_l32( XPTR( thread_cxy , &thread_ptr->trdid ) );
     
    386386
    387387#if DEBUG_RWLOCK_TYPE
    388 if( lock_type == DEBUG_RWLOCK_TYPE )
     388if( (DEBUG_RWLOCK_TYPE == lock_type) || (DEBUG_RWLOCK_TYPE == 1000) )
    389389{
    390390    trdid_t     trdid     = hal_remote_l32( XPTR( thread_cxy , &thread_ptr->trdid ) );
  • trunk/kernel/libk/rwlock.c

    r610 r623  
    7171    busylock_acquire( &lock->lock );
    7272
     73#if DEBUG_RWLOCK_TYPE
     74uint32_t lock_type = lock->lock.type;
     75#endif
     76
    7377    // block and deschedule if lock already taken
    7478    while( lock->taken )
     
    7680
    7781#if DEBUG_RWLOCK_TYPE
    78 uint32_t lock_type = lock->lock.type;
    79 if( DEBUG_RWLOCK_TYPE == lock_type )
     82if( (DEBUG_RWLOCK_TYPE == lock_type) || (DEBUG_RWLOCK_TYPE == 1000) )
    8083printk("\n[%s] thread[%x,%x] READ BLOCK on rwlock %s [%x,%x] / taken %d / count %d\n",
    8184__FUNCTION__, this->process->pid, this->trdid,
     
    102105
    103106#if DEBUG_RWLOCK_TYPE
    104 if( DEBUG_RWLOCK_TYPE == lock_type )
     107if( (DEBUG_RWLOCK_TYPE == lock_type) || (DEBUG_RWLOCK_TYPE == 1000) )
    105108printk("\n[%s] thread[%x,%x] READ ACQUIRE rwlock %s [%x,%x] / taken %d / count %d\n",
    106109__FUNCTION__, this->process->pid, this->trdid,
     
    124127    busylock_acquire( &lock->lock );
    125128
     129#if DEBUG_RWLOCK_TYPE
     130uint32_t lock_type = lock->lock.type;
     131#endif
     132
    126133    // block and deschedule if lock already taken or existing read access
    127134    while( lock->taken || lock->count )
     
    129136
    130137#if DEBUG_RWLOCK_TYPE
    131 uint32_t lock_type = lock->lock.type;
    132 if( DEBUG_RWLOCK_TYPE == lock_type )
     138if( (DEBUG_RWLOCK_TYPE == lock_type) || (DEBUG_RWLOCK_TYPE == 1000) )
    133139printk("\n[%s] thread[%x,%x] WRITE BLOCK on rwlock %s [%x,%x] / taken %d / count %d\n",
    134140__FUNCTION__, this->process->pid, this->trdid,
     
    155161
    156162#if DEBUG_RWLOCK_TYPE
    157 if( DEBUG_RWLOCK_TYPE == lock_type )
     163if( (DEBUG_RWLOCK_TYPE == lock_type) || (DEBUG_RWLOCK_TYPE == 1000) )
    158164printk("\n[%s] thread[%x,%x] WRITE ACQUIRE rwlock %s [%x,%x] / taken %d / count %d\n",
    159165__FUNCTION__, this->process->pid, this->trdid,
     
    181187thread_t * this = CURRENT_THREAD;
    182188uint32_t lock_type = lock->lock.type;
    183 if( DEBUG_RWLOCK_TYPE == lock_type )
     189if( (DEBUG_RWLOCK_TYPE == lock_type) || (DEBUG_RWLOCK_TYPE == 1000) )
    184190printk("\n[%s] thread[%x,%x] READ RELEASE rwlock %s [%x,%x] / taken %d / count %d\n",
    185191__FUNCTION__, this->process->pid, this->trdid,
     
    195201
    196202#if DEBUG_RWLOCK_TYPE
    197 if( DEBUG_RWLOCK_TYPE == lock_type )
     203if( (DEBUG_RWLOCK_TYPE == lock_type) || (DEBUG_RWLOCK_TYPE == 1000) )
    198204printk("\n[%s] thread[%x,%x] UNBLOCK thread[%x,%x] / rwlock %s [%x,%x]\n",
    199205__FUNCTION__, this->process->pid, this->trdid, thread->process->pid, thread->trdid,
     
    217223
    218224#if DEBUG_RWLOCK_TYPE
    219 if( DEBUG_RWLOCK_TYPE == lock_type )
     225if( (DEBUG_RWLOCK_TYPE == lock_type) || (DEBUG_RWLOCK_TYPE == 1000) )
    220226printk("\n[%s] thread[%x,%x] UNBLOCK thread[%x,%x] / rwlock %s [%x,%x]\n",
    221227__FUNCTION__, this->process->pid, this->trdid, thread->process->pid, thread->trdid,
     
    251257thread_t * this = CURRENT_THREAD;
    252258uint32_t lock_type = lock->lock.type;
    253 if( DEBUG_RWLOCK_TYPE == lock_type )
     259if( (DEBUG_RWLOCK_TYPE == lock_type) || (DEBUG_RWLOCK_TYPE == 1000) )
    254260printk("\n[%s] thread[%x,%x] WRITE RELEASE rwlock %s [%x,%x] / taken %d / count %d\n",
    255261__FUNCTION__, this->process->pid, this->trdid,
     
    264270
    265271#if DEBUG_RWLOCK_TYPE
    266 if( DEBUG_RWLOCK_TYPE == lock_type )
     272if( (DEBUG_RWLOCK_TYPE == lock_type) || (DEBUG_RWLOCK_TYPE == 1000) )
    267273printk("\n[%s] thread[%x,%x] UNBLOCK thread[%x,%x] / rwlock %s [%x,%x]\n",
    268274__FUNCTION__, this->process->pid, this->trdid, thread->process->pid, thread->trdid,
     
    285291
    286292#if DEBUG_RWLOCK_TYPE
    287 if( DEBUG_RWLOCK_TYPE == lock_type )
     293if( (DEBUG_RWLOCK_TYPE == lock_type) || (DEBUG_RWLOCK_TYPE == 1000) )
    288294printk("\n[%s] thread[%x,%x] UNBLOCK thread[%x,%x] / rwlock %s [%x,%x]\n",
    289295__FUNCTION__, this->process->pid, this->trdid, thread->process->pid, thread->trdid,
  • trunk/kernel/libk/user_dir.h

    r614 r623  
    8686 * - the allocation of one or several physical pages in reference cluster to store
    8787 *   all directory entries in an array of 64 bytes dirent structures,
    88  * - the initialisation of this array from informations found in the Inode Tree.
     88 * - the initialisation of this array from informations found in the directory mapper.
    8989 * - the creation of an ANON vseg containing this dirent array in reference process VMM,
    9090 *   and the mapping of the relevant physical pages in this vseg.
  • trunk/kernel/mm/mapper.c

    r614 r623  
    33 *
    44 * Authors   Mohamed Lamine Karaoui (2015)
    5  *           Alain Greiner (2016,2017,2018)
     5 *           Alain Greiner (2016,2017,2018,2019)
    66 *
    77 * Copyright (c)  UPMC Sorbonne Universites
     
    261261vfs_inode_t * inode = mapper->inode;
    262262vfs_inode_get_name( XPTR( local_cxy , inode ) , name );
    263 // if( DEBUG_MAPPER_HANDLE_MISS < cycle )
    264 // if( (page_id == 1) && (cycle > 10000000) )
     263if( DEBUG_MAPPER_HANDLE_MISS < cycle )
    265264printk("\n[%s] enter for page %d in <%s> / cycle %d",
    266265__FUNCTION__, page_id, name, cycle );
     
    322321#if DEBUG_MAPPER_HANDLE_MISS
    323322cycle = (uint32_t)hal_get_cycles();
    324 // if( DEBUG_MAPPER_HANDLE_MISS < cycle )
    325 // if( (page_id == 1) && (cycle > 10000000) )
     323if( DEBUG_MAPPER_HANDLE_MISS < cycle )
    326324printk("\n[%s] exit for page %d in <%s> / ppn %x / cycle %d",
    327325__FUNCTION__, page_id, name, ppm_page2ppn( *page_xp ), cycle );
     
    442440            ppm_page_do_dirty( page_xp );
    443441            hal_copy_from_uspace( map_ptr , buf_ptr , page_count );
     442
     443putb(" in mapper_move_user()" , map_ptr , page_count );
     444
    444445        }
    445446
     
    645646
    646647}  // end mapper_remote_set_32()
     648
     649/////////////////////////////////////////
     650error_t mapper_sync( mapper_t *  mapper )
     651{
     652    page_t   * page;                // local pointer on current page descriptor
     653    xptr_t     page_xp;             // extended pointer on current page descriptor
     654    grdxt_t  * rt;                  // pointer on radix_tree descriptor
     655    uint32_t   start_key;           // start page index in mapper
     656    uint32_t   found_key;           // current page index in mapper
     657    error_t    error;
     658
     659#if DEBUG_MAPPER_SYNC
     660thread_t * this  = CURRENT_THREAD;
     661uint32_t   cycle = (uint32_t)hal_get_cycles();
     662char       name[CONFIG_VFS_MAX_NAME_LENGTH];
     663vfs_inode_get_name( XPTR( local_cxy , mapper->inode ) , name );
     664#endif
     665
     666    // get pointer on radix tree
     667    rt        = &mapper->rt;
     668
     669    // initialise loop variable
     670    start_key = 0;
     671
     672    // scan radix-tree until last page found
     673    while( 1 )
     674    {
     675        // get page descriptor from radix tree
     676        page = (page_t *)grdxt_get_first( rt , start_key , &found_key );
     677         
     678        if( page == NULL ) break;
     679
     680assert( (page->index == found_key ), __FUNCTION__, "wrong page descriptor index" );
     681assert( (page->order == 0),          __FUNCTION__, "mapper page order must be 0" );
     682
     683        // build extended pointer on page descriptor
     684        page_xp = XPTR( local_cxy , page );
     685
     686        // synchronize page if dirty
     687        if( (page->flags & PG_DIRTY) != 0 )
     688        {
     689
     690#if DEBUG_MAPPER_SYNC
     691if( cycle > DEBUG_MAPPER_SYNC )
     692printk("\n[%s] thread[%x,%x] synchonise page %d of <%s> to device\n",
     693__FUNCTION__, this->process->pid, this->trdid, page->index, name );
     694#endif
     695            // copy page to file system
     696            error = vfs_fs_move_page( page_xp , IOC_WRITE );
     697
     698            if( error )
     699            {
     700                printk("\n[ERROR] in %s : cannot synchonize dirty page %d\n",
     701                __FUNCTION__, page->index );
     702                return -1;
     703            }
     704
     705            // remove page from PPM dirty list
     706            ppm_page_undo_dirty( page_xp );
     707        }
     708        else
     709        {
     710
     711#if DEBUG_MAPPER_SYNC
     712if( cycle > DEBUG_MAPPER_SYNC )
     713printk("\n[%s] thread[%x,%x] skip page %d for <%s>\n",
     714__FUNCTION__, this->process->pid, this->trdid, page->index, name );
     715#endif
     716        }
     717
     718        // update loop variable
     719        start_key = page->index + 1;
     720    }  // end while
     721
     722    return 0;
     723
     724}  // end mapper_sync()
    647725
    648726//////////////////////////////////////////////////
  • trunk/kernel/mm/mapper.h

    r614 r623  
    33 *
    44 * Authors   Mohamed Lamine Karaoui (2015)
    5  *           Alain Greiner (2016,2017,2018)
     5 *           Alain Greiner (2016,2017,2018,2019)
    66 *
    77 * Copyright (c)  UPMC Sorbonne Universites
     
    4848 *   "readers", and only one "writer".
    4949 * - A "reader" thread, calling the mapper_remote_get_page() function to get a page
    50  *   descriptor pointer from the page index in file, can be remote (running in any cluster).
     50 *   descriptor pointer from the page index in file, can be running in any cluster.
    5151 * - A "writer" thread, calling the mapper_handle_miss() function to handle a page miss
    5252 *   must be local (running in the mapper cluster).
    53  * - The vfs_mapper_move_page() function access the file system to handle a mapper miss,
     53 * - The vfs_fs_move_page() function access the file system to handle a mapper miss,
    5454 *   or update a dirty page on device.
    5555 * - The vfs_mapper_load_all() functions is used to load all pages of a directory
     
    6363 *
    6464 * TODO : the mapper being only used to implement the VFS cache(s), the mapper.c
    65  *        and mapper.h file should be trandfered to the vfs directory.
     65 *        and mapper.h file should be trandfered to the fs directory.
    6666 ******************************************************************************************/
    6767
     
    230230
    231231/*******************************************************************************************
     232 * This scans all pages present in the mapper identified by the <mapper> argument,
     233 * and synchronize all pages maked as dirty" on disk.
     234 * These pages are unmarked and removed from the local PPM dirty_list.
     235 * This function must be called by a local thread running in same cluster as the mapper.
     236 * A remote thread must call the RPC_MAPPER_SYNC function.
     237 *******************************************************************************************
     238 * @ mapper     : [in]  local pointer on local mapper.
     239 * @ returns 0 if success / return -1 if error.
     240 ******************************************************************************************/
     241error_t mapper_sync( mapper_t *  mapper );
     242
     243/*******************************************************************************************
    232244 * This debug function displays the content of a given page of a given mapper.
    233245 * - the mapper is identified by the <mapper_xp> argument.
  • trunk/kernel/mm/page.h

    r612 r623  
    4141#define PG_INIT             0x0001     // page descriptor has been initialised
    4242#define PG_RESERVED         0x0002     // cannot be allocated by PPM
    43 #define PG_FREE             0x0004     // page can be allocated by PPM
     43#define PG_FREE             0x0004     // page not yet allocated by PPM
    4444#define PG_DIRTY            0x0040     // page has been written
    4545#define PG_COW          0x0080     // page is copy-on-write
  • trunk/kernel/mm/ppm.h

    r611 r623  
    33 *
    44 * Authors  Ghassan Almaless (2008,2009,2010,2011,2012)
    5  *          Alain Greiner    (2016,2017,2018)
     5 *          Alain Greiner    (2016,2017,2018,2019)
    66 *
    77 * Copyright (c) UPMC Sorbonne Universites
     
    3737 * This structure defines the Physical Pages Manager in a cluster.
    3838 * In each cluster, the physical memory bank starts at local physical address 0 and
    39  * contains an integer number of pages, defined by the <pages_nr> field in the
     39 * contains an integer number of small pages, defined by the <pages_nr> field in the
    4040 * boot_info structure. It is split in three parts:
    4141 *
    4242 * - the "kernel_code" section contains the kernel code, loaded by the boot-loader.
    43  *   It starts at PPN = 0 and the size is defined by the <pages_offset> field in the
    44  *   boot_info structure.
    45  * - the "pages_tbl" section contains the physical page descriptors array. It starts
    46  *   at PPN = pages_offset, and it contains one entry per small physical page in cluster.
     43 *   It starts at local PPN = 0 and the size is defined by the <pages_offset> field
     44 *   in the boot_info structure.
     45 * - the local "pages_tbl" section contains the physical page descriptors array.
     46 *   It starts at local PPN = pages_offset, and it contains one entry per small page.
    4747 *   It is created and initialized by the hal_ppm_create() function.
    4848 * - The "kernel_heap" section contains all physical pages that are are not in the
    49  *   kernel_code and pages_tbl sections, and that have not been reserved by the
    50  *   architecture specific bootloader. The reserved pages are defined in the boot_info
    51  *   structure.
     49 *   "kernel_code" and "pages_tbl" sections, and that have not been reserved.
     50 *   The reserved pages are defined in the boot_info structure.
    5251 *
    5352 * The main service provided by the PMM is the dynamic allocation of physical pages
     
    6059 *
    6160 * Another service is to register the dirty pages in a specific dirty_list, that is
    62  * also rooted in the PPM, in order to be able to save all dirty pages on disk.
     61 * also rooted in the PPM, in order to be able to synchronize all dirty pages on disk.
    6362 * This dirty list is protected by a specific remote_queuelock, because it can be
    6463 * modified by a remote thread, but it contains only local pages.
     
    198197 *   . if page already dirty => do nothing
    199198 *   . it page not dirty => set the PG_DIRTY flag and register page in PPM dirty list.
    200  * - it releases the busylock protcting the page flags.
     199 * - it releases the busylock protecting the page flags.
    201200 * - it releases the queuelock protecting the PPM dirty_list.
    202201 *****************************************************************************************
     
    214213 *   . if page not dirty => do nothing
    215214 *   . it page dirty => reset the PG_DIRTY flag and remove page from PPM dirty list.
    216  * - it releases the busylock protcting the page flags.
     215 * - it releases the busylock protecting the page flags.
    217216 * - it releases the queuelock protecting the PPM dirty_list.
    218217 *****************************************************************************************
  • trunk/kernel/mm/vmm.c

    r621 r623  
    5959{
    6060    error_t   error;
    61     vseg_t  * vseg_kentry;
    6261    vseg_t  * vseg_args;
    6362    vseg_t  * vseg_envs;
     
    9190(CONFIG_VMM_VSPACE_SIZE - CONFIG_VMM_STACK_BASE)) ,
    9291"STACK zone too small\n");
    93 
    94     // register kentry vseg in VSL
    95     base = CONFIG_VMM_KENTRY_BASE << CONFIG_PPM_PAGE_SHIFT;
    96     size = CONFIG_VMM_KENTRY_SIZE << CONFIG_PPM_PAGE_SHIFT;
    97 
    98     vseg_kentry = vmm_create_vseg( process,
    99                                    VSEG_TYPE_CODE,
    100                                    base,
    101                                    size,
    102                                    0,             // file_offset unused
    103                                    0,             // file_size unused
    104                                    XPTR_NULL,     // mapper_xp unused
    105                                    local_cxy );
    106 
    107     if( vseg_kentry == NULL )
    108     {
    109         printk("\n[ERROR] in %s : cannot register kentry vseg\n", __FUNCTION__ );
    110         return -1;
    111     }
    112 
    113     vmm->kent_vpn_base = base;
    11492
    11593    // register args vseg in VSL
     
    162140
    163141    if( error )
    164     printk("\n[ERROR] in %s : cannot create GPT\n", __FUNCTION__ );
     142    {
     143        printk("\n[ERROR] in %s : cannot create GPT\n", __FUNCTION__ );
     144        return -1;
     145    }
    165146
    166147    // initialize GPT lock
    167148    remote_rwlock_init( XPTR( local_cxy , &vmm->gpt_lock ) , LOCK_VMM_GPT );
    168149
    169     // architecture specic GPT initialisation
    170     // (For TSAR, identity map the kentry_vseg)
    171     error = hal_vmm_init( vmm );
    172 
    173     if( error )
    174     printk("\n[ERROR] in %s : cannot initialize GPT\n", __FUNCTION__ );
     150    // update process VMM with kernel vsegs
     151    error = hal_vmm_kernel_update( process );
     152
     153    if( error )
     154    {
     155        printk("\n[ERROR] in %s : cannot update GPT for kernel vsegs\n", __FUNCTION__ );
     156        return -1;
     157    }
    175158
    176159    // initialize STACK allocator
     
    326309    }
    327310
    328     // release physical memory allocated for vseg descriptor if no MMAP type
    329     if( (type != VSEG_TYPE_ANON) && (type != VSEG_TYPE_FILE) && (type != VSEG_TYPE_REMOTE) )
     311    // release physical memory allocated for vseg if no MMAP and no kernel type
     312    if( (type != VSEG_TYPE_ANON) && (type != VSEG_TYPE_FILE) && (type != VSEG_TYPE_REMOTE) &&
     313        (type != VSEG_TYPE_KCODE) && (type != VSEG_TYPE_KDATA) && (type != VSEG_TYPE_KDEV) )
    330314    {
    331315        vseg_free( vseg );
     
    606590    child_vmm->vsegs_nr = 0;
    607591
    608     // create child GPT
     592    // create the child GPT
    609593    error = hal_gpt_create( &child_vmm->gpt );
    610594
     
    639623#endif
    640624
    641         // all parent vsegs - but STACK - must be copied in child VSL
    642         if( type != VSEG_TYPE_STACK )
     625        // all parent vsegs - but STACK and kernel vsegs - must be copied in child VSL
     626        if( (type != VSEG_TYPE_STACK) && (type != VSEG_TYPE_KCODE) &&
     627            (type != VSEG_TYPE_KDATA) && (type != VSEG_TYPE_KDEV) )
    643628        {
    644629            // allocate memory for a new child vseg
     
    726711    remote_rwlock_rd_release( parent_lock_xp );
    727712
    728     // initialize child GPT (architecture specic)
    729     // => For TSAR, identity map the kentry_vseg
    730     error = hal_vmm_init( child_vmm );
     713    // update child VMM with kernel vsegs
     714    error = hal_vmm_kernel_update( child_process );
    731715
    732716    if( error )
    733717    {
    734         printk("\n[ERROR] in %s : cannot create GPT\n", __FUNCTION__ );
     718        printk("\n[ERROR] in %s : cannot update child VMM\n", __FUNCTION__ );
    735719        return -1;
    736720    }
     
    10981082        base = vpn_base << CONFIG_PPM_PAGE_SHIFT;
    10991083    }
    1100     else    // VSEG_TYPE_DATA or VSEG_TYPE_CODE
     1084    else    // VSEG_TYPE_DATA, VSEG_TYPE_CODE or KERNEL vseg
    11011085    {
    11021086        uint32_t vpn_min = base >> CONFIG_PPM_PAGE_SHIFT;
     
    11781162    xptr_t      lock_xp;    // extended pointer on lock protecting forks counter
    11791163    uint32_t    forks;      // actual number of pendinf forks
     1164    uint32_t    type;       // vseg type
    11801165
    11811166#if DEBUG_VMM_DELETE_VSEG
     
    11901175    process = cluster_get_local_process_from_pid( pid );
    11911176
    1192     if( process == NULL ) return;
     1177    if( process == NULL )
     1178    {
     1179        printk("\n[ERRORR] in %s : cannot get local process descriptor\n",
     1180        __FUNCTION__ );
     1181        return;
     1182    }
    11931183
    11941184    // get pointers on local process VMM an GPT
     
    11991189    vseg = vmm_vseg_from_vaddr( vmm , vaddr );
    12001190
    1201     if( vseg == NULL ) return;
    1202 
    1203     // loop to invalidate all vseg PTEs in GPT
     1191    if( vseg == NULL )
     1192    {
     1193        printk("\n[ERRORR] in %s : cannot get vseg descriptor\n",
     1194        __FUNCTION__ );
     1195        return;
     1196    }
     1197
     1198    // get relevant vseg infos
     1199    type    = vseg->type;
    12041200    vpn_min = vseg->vpn_base;
    12051201    vpn_max = vpn_min + vseg->vpn_size;
     1202
     1203    // loop to invalidate all vseg PTEs in GPT
    12061204        for( vpn = vpn_min ; vpn < vpn_max ; vpn++ )
    12071205    {
     
    12161214printk("- unmap vpn %x / ppn %x / vseg %s \n" , vpn , ppn, vseg_type_str(vseg->type) );
    12171215#endif
    1218 
    1219 // check small page
    1220 assert( (attr & GPT_SMALL) , "an user vseg must use small pages" );
    1221 
    12221216            // unmap GPT entry in local GPT
    12231217            hal_gpt_reset_pte( gpt , vpn );
    12241218
    1225             // handle pending forks counter if
    1226             // 1) not identity mapped
    1227             // 2) reference cluster
    1228             if( ((vseg->flags & VSEG_IDENT)  == 0) &&
    1229                 (GET_CXY( process->ref_xp ) == local_cxy) )
     1219            // the allocated page is not released to KMEM for kernel vseg
     1220            if( (type != VSEG_TYPE_KCODE) &&
     1221                (type != VSEG_TYPE_KDATA) &&
     1222                (type != VSEG_TYPE_KDEV ) )
    12301223            {
     1224
     1225// FIXME This code must be completely re-written, as the actual release must depend on
     1226// - the vseg type
     1227// - the reference cluster
     1228// - the page refcount and/or the forks counter
     1229
    12311230                // get extended pointer on physical page descriptor
    12321231                page_xp  = ppm_ppn2page( ppn );
     
    12381237                lock_xp  = XPTR( page_cxy , &page_ptr->lock );
    12391238
     1239                // get the lock protecting the page
    12401240                remote_busylock_acquire( lock_xp );
     1241
    12411242                // get pending forks counter
    12421243                forks = hal_remote_l32( forks_xp );
     1244
    12431245                if( forks )  // decrement pending forks counter
    12441246                {
     
    12631265#endif
    12641266                }
     1267
     1268                // release the lock protecting the page
    12651269                remote_busylock_release( lock_xp );
    12661270            }
     
    13111315    // return failure
    13121316    remote_rwlock_rd_release( lock_xp );
     1317
    13131318    return NULL;
    13141319
     
    13251330    vpn_t     vpn_max;
    13261331
     1332#if DEBUG_VMM_RESIZE_VSEG
     1333uint32_t   cycle = (uint32_t)hal_get_cycles();
     1334thread_t * this  = CURRENT_THREAD;
     1335if( DEBUG_VMM_RESIZE_VSEG < cycle )
     1336printk("\n[%s] thread[%x,%x] enter / process %x / base %x / size %d / cycle %d\n",
     1337__FUNCTION__, this->process->pid, this->trdid, process->pid, base, size, cycle );
     1338#endif
     1339
    13271340    // get pointer on process VMM
    13281341    vmm_t * vmm = &process->vmm;
     
    13341347        vseg_t * vseg = vmm_vseg_from_vaddr( vmm , base );
    13351348
    1336         if( vseg == NULL)  return EINVAL;
    1337 
    1338     // get extended pointer on VSL lock
    1339     xptr_t lock_xp = XPTR( local_cxy , &vmm->vsegs_lock );
    1340 
    1341     // get lock protecting VSL
    1342         remote_rwlock_wr_acquire( lock_xp );
    1343 
     1349        if( vseg == NULL)
     1350    {
     1351        printk("\n[ERROR] in %s : vseg(%x,%d) not found\n",
     1352        __FUNCTION__, base , size );
     1353        return -1;
     1354    }
     1355
     1356    // resize depends on unmapped region base and size
    13441357        if( (vseg->min > addr_min) || (vseg->max < addr_max) )        // not included in vseg
    13451358    {
     1359        printk("\n[ERROR] in %s : unmapped region[%x->%x[ not included in vseg[%x->%x[\n",
     1360        __FUNCTION__, addr_min, addr_max, vseg->min, vseg->max );
     1361
    13461362        error = -1;
    13471363    }
    13481364        else if( (vseg->min == addr_min) && (vseg->max == addr_max) )  // vseg must be deleted
    13491365    {
     1366
     1367#if( DEBUG_VMM_RESIZE_VSEG & 1 )
     1368if( DEBUG_VMM_RESIZE_VSEG < cycle )
     1369printk("\n[%s] unmapped region[%x->%x[ equal vseg[%x->%x[\n",
     1370__FUNCTION__, addr_min, addr_max, vseg->min, vseg->max );
     1371#endif
    13501372        vmm_delete_vseg( process->pid , vseg->min );
     1373
     1374#if( DEBUG_VMM_RESIZE_VSEG & 1 )
     1375if( DEBUG_VMM_RESIZE_VSEG < cycle )
     1376printk("\n[%s] thread[%x,%x] deleted vseg\n",
     1377__FUNCTION__, this->process->pid, this->trdid );
     1378#endif
    13511379        error = 0;
    13521380    }
    13531381        else if( vseg->min == addr_min )                               // vseg must be resized
    13541382    {
    1355         // update vseg base address
     1383
     1384#if( DEBUG_VMM_RESIZE_VSEG & 1 )
     1385if( DEBUG_VMM_RESIZE_VSEG < cycle )
     1386printk("\n[%s] unmapped region[%x->%x[ included in vseg[%x->%x[\n",
     1387__FUNCTION__, addr_min, addr_max, vseg->min, vseg->max );
     1388#endif
     1389        // update vseg min address
    13561390        vseg->min = addr_max;
    13571391
     
    13611395        vseg->vpn_base = vpn_min;
    13621396        vseg->vpn_size = vpn_max - vpn_min + 1;
     1397
     1398#if( DEBUG_VMM_RESIZE_VSEG & 1 )
     1399if( DEBUG_VMM_RESIZE_VSEG < cycle )
     1400printk("\n[%s] thread[%x,%x] changed vseg_min\n",
     1401__FUNCTION__, this->process->pid, this->trdid );
     1402#endif
    13631403        error = 0;
    13641404    }
    13651405        else if( vseg->max == addr_max )                              // vseg must be resized
    13661406    {
     1407
     1408#if( DEBUG_VMM_RESIZE_VSEG & 1 )
     1409if( DEBUG_VMM_RESIZE_VSEG < cycle )
     1410printk("\n[%s] unmapped region[%x->%x[ included in vseg[%x->%x[\n",
     1411__FUNCTION__, addr_min, addr_max, vseg->min, vseg->max );
     1412#endif
    13671413        // update vseg max address
    13681414        vseg->max = addr_min;
     
    13731419        vseg->vpn_base = vpn_min;
    13741420        vseg->vpn_size = vpn_max - vpn_min + 1;
     1421
     1422#if( DEBUG_VMM_RESIZE_VSEG & 1 )
     1423if( DEBUG_VMM_RESIZE_VSEG < cycle )
     1424printk("\n[%s] thread[%x,%x] changed vseg_max\n",
     1425__FUNCTION__, this->process->pid, this->trdid );
     1426#endif
    13751427        error = 0;
     1428
    13761429    }
    13771430    else                                                          // vseg cut in three regions
    13781431    {
     1432
     1433#if( DEBUG_VMM_RESIZE_VSEG & 1 )
     1434if( DEBUG_VMM_RESIZE_VSEG < cycle )
     1435printk("\n[%s] unmapped region[%x->%x[ included in vseg[%x->%x[\n",
     1436__FUNCTION__, addr_min, addr_max, vseg->min, vseg->max );
     1437#endif
    13791438        // resize existing vseg
    13801439        vseg->max = addr_min;
     
    13961455                               vseg->cxy );
    13971456
    1398         if( new == NULL ) error = EINVAL;
     1457#if( DEBUG_VMM_RESIZE_VSEG & 1 )
     1458if( DEBUG_VMM_RESIZE_VSEG < cycle )
     1459printk("\n[%s] thread[%x,%x] replaced vseg by two smal vsegs\n",
     1460__FUNCTION__, this->process->pid, this->trdid );
     1461#endif
     1462
     1463        if( new == NULL ) error = -1;
    13991464        else              error = 0;
    14001465    }
    14011466
    1402     // release VMM lock
    1403         remote_rwlock_wr_release( lock_xp );
     1467#if DEBUG_VMM_RESIZE_VSEG
     1468if( DEBUG_VMM_RESIZE_VSEG < cycle )
     1469printk("\n[%s] thread[%x,%x] exit / process %x / base %x / size %d / cycle %d\n",
     1470__FUNCTION__, this->process->pid, this->trdid, process->pid, base, size, cycle );
     1471#endif
    14041472
    14051473        return error;
  • trunk/kernel/mm/vmm.h

    r614 r623  
    44 * Authors   Ghassan Almaless (2008,2009,2010,2011, 2012)
    55 *           Mohamed Lamine Karaoui (2015)
    6  *           Alain Greiner (2016,2017,2018)
     6 *           Alain Greiner (2016,2017,2018,2019)
    77 *
    88 * Copyright (c) UPMC Sorbonne Universites
  • trunk/kernel/mm/vseg.c

    r595 r623  
    44 * Authors   Ghassan Almaless (2008,2009,2010,2011, 2012)
    55 *           Mohamed Lamine Karaoui (2015)
    6  *           Alain Greiner (2016,2018,2019)
     6 *           Alain Greiner (2016,2017,2018,2019)
    77 *
    88 * Copyright (c) UPMC Sorbonne Universites
     
    5555        else if( vseg_type == VSEG_TYPE_FILE   ) return "FILE";
    5656        else if( vseg_type == VSEG_TYPE_REMOTE ) return "REMO";
     57        else if( vseg_type == VSEG_TYPE_KCODE  ) return "KCOD";
     58        else if( vseg_type == VSEG_TYPE_KDATA  ) return "KDAT";
     59        else if( vseg_type == VSEG_TYPE_KDEV   ) return "KDEV";
    5760    else                                     return "undefined";
    5861}
     
    142145                      VSEG_CACHE   ;
    143146    }
     147    else if( type == VSEG_TYPE_KCODE )
     148    {
     149        vseg->flags = VSEG_EXEC    |
     150                      VSEG_CACHE   |
     151                      VSEG_PRIVATE ;
     152    }
     153    else if( type == VSEG_TYPE_KDATA )
     154    {
     155        vseg->flags = VSEG_CACHE   |
     156                      VSEG_WRITE   ;
     157    }
     158    else if( type == VSEG_TYPE_KDEV )
     159    {
     160        vseg->flags = VSEG_WRITE   ;
     161    }
    144162    else
    145163    {
     
    158176
    159177    // initialize vseg with remote_read access
    160     vseg->type        =           hal_remote_l32 ( XPTR( cxy , &ptr->type        ) );
     178    vseg->type        =           hal_remote_l32( XPTR( cxy , &ptr->type        ) );
    161179    vseg->min         = (intptr_t)hal_remote_lpt( XPTR( cxy , &ptr->min         ) );
    162180    vseg->max         = (intptr_t)hal_remote_lpt( XPTR( cxy , &ptr->max         ) );
    163     vseg->vpn_base    =           hal_remote_l32 ( XPTR( cxy , &ptr->vpn_base    ) );
    164     vseg->vpn_size    =           hal_remote_l32 ( XPTR( cxy , &ptr->vpn_size    ) );
    165     vseg->flags       =           hal_remote_l32 ( XPTR( cxy , &ptr->flags       ) );
    166     vseg->file_offset =           hal_remote_l32 ( XPTR( cxy , &ptr->file_offset ) );
    167     vseg->file_size   =           hal_remote_l32 ( XPTR( cxy , &ptr->file_size   ) );
     181    vseg->vpn_base    =           hal_remote_l32( XPTR( cxy , &ptr->vpn_base    ) );
     182    vseg->vpn_size    =           hal_remote_l32( XPTR( cxy , &ptr->vpn_size    ) );
     183    vseg->flags       =           hal_remote_l32( XPTR( cxy , &ptr->flags       ) );
     184    vseg->file_offset =           hal_remote_l32( XPTR( cxy , &ptr->file_offset ) );
     185    vseg->file_size   =           hal_remote_l32( XPTR( cxy , &ptr->file_size   ) );
    168186        vseg->mapper_xp   = (xptr_t)  hal_remote_l64( XPTR( cxy , &ptr->mapper_xp   ) );
    169187
    170188    switch (vseg->type)
    171189    {
    172         case VSEG_TYPE_DATA:
     190        case VSEG_TYPE_DATA:      // unused
    173191        {
    174192            vseg->cxy = 0xffff;
    175193            break;
    176194        }
    177         case VSEG_TYPE_CODE:
     195        case VSEG_TYPE_CODE:      // always local
    178196        case VSEG_TYPE_STACK:
     197        case VSEG_TYPE_KCODE:
    179198        {
    180199            vseg->cxy = local_cxy;
    181200            break;
    182201        }
    183         case VSEG_TYPE_ANON:
     202        case VSEG_TYPE_ANON:      // intrinsic
    184203        case VSEG_TYPE_FILE:
    185204        case VSEG_TYPE_REMOTE:
     205        case VSEG_TYPE_KDEV:
     206        case VSEG_TYPE_KDATA:
    186207        {
    187208            vseg->cxy = (cxy_t) hal_remote_l32( XPTR(cxy, &ptr->cxy) );
  • trunk/kernel/mm/vseg.h

    r611 r623  
    44 * Authors   Ghassan Almaless (2008,2009,2010,2011, 2012)
    55 *           Mohamed Lamine Karaoui (2015)
    6  *           Alain Greiner (2016)
     6 *           Alain Greiner (2016,2017,2018,2019)
    77 *
    88 * Copyright (c) UPMC Sorbonne Universites
     
    3535
    3636/*******************************************************************************************
    37  * This enum defines the vseg types for an user process.
     37 * This enum defines the vseg types.
     38 * Note : the KDATA and KDEV types are not used by the TSAR HAL, because the accesses
     39 *        to kernel data or kernel devices are done through the DATA extension address
     40 *        register, but these types are probably required by the I86 HAL [AG].
    3841 ******************************************************************************************/
    3942
    4043typedef enum
    4144{
    42     VSEG_TYPE_CODE   = 0,          /*! executable user code   / private / localized       */
    43     VSEG_TYPE_DATA   = 1,          /*! initialized user data  / public  / distributed     */
    44     VSEG_TYPE_STACK  = 2,          /*! execution user stack   / private / localized       */
    45     VSEG_TYPE_ANON   = 3,          /*! anonymous mmap         / public  / localized       */
    46     VSEG_TYPE_FILE   = 4,          /*! file mmap              / public  / localized       */
    47     VSEG_TYPE_REMOTE = 5,          /*! remote mmap            / public  / localized       */
     45    VSEG_TYPE_CODE   = 0,          /*! executable user code     / private / localized     */
     46    VSEG_TYPE_DATA   = 1,          /*! initialized user data    / public  / distributed   */
     47    VSEG_TYPE_STACK  = 2,          /*! execution user stack     / private / localized     */
     48    VSEG_TYPE_ANON   = 3,          /*! anonymous mmap           / public  / localized     */
     49    VSEG_TYPE_FILE   = 4,          /*! file mmap                / public  / localized     */
     50    VSEG_TYPE_REMOTE = 5,          /*! remote mmap              / public  / localized     */
     51
     52    VSEG_TYPE_KCODE  = 6,          /*! executable kernel code   / private / localized     */
     53    VSEG_TYPE_KDATA  = 7,          /*! initialized kernel data  / private / localized     */
     54    VSEG_TYPE_KDEV   = 8,          /*! kernel peripheral device / public  / localized     */
    4855}
    4956vseg_type_t;
     
    6067#define VSEG_PRIVATE  0x0010       /*! should not be accessed from another cluster        */
    6168#define VSEG_DISTRIB  0x0020       /*! physically distributed on all clusters             */
    62 #define VSEG_IDENT    0x0040       /*! identity mapping                                   */
    6369
    6470/*******************************************************************************************
  • trunk/kernel/syscalls/shared_include/shared_almos.h

    r611 r623  
    5353    DISPLAY_BUSYLOCKS         = 8,
    5454    DISPLAY_MAPPER            = 9,
     55    DISPLAY_BARRIER           = 10,
    5556}
    5657display_type_t;
  • trunk/kernel/syscalls/shared_include/shared_mman.h

    r594 r623  
    22 * shred_mman.h - Shared structures & mnemonics used by the <mman.h> user library.
    33 *
    4  * Author  Alain Greiner (2016,2017,2018)
     4 * Author  Alain Greiner (2016,2017,2018,2019)
    55 *
    66 * Copyright (c) UPMC Sorbonne Universites
     
    2626
    2727/*******************************************************************************************
    28  * These structure are used by the mmap() syscall().
     28 * This structure is used by the mmap() syscall().
    2929 ******************************************************************************************/
    3030
  • trunk/kernel/syscalls/sys_creat.c

    r457 r623  
    22 * sys_creat.c - create a file
    33 *
    4  * Author    Alain Greiner (2016,2017)
     4 * Author    Alain Greiner (2016,2017,2017,2019)
    55 *
    66 * Copyright (c) UPMC Sorbonne Universites
  • trunk/kernel/syscalls/sys_display.c

    r619 r623  
    3131#include <string.h>
    3232#include <shared_syscalls.h>
     33#include <remote_barrier.h>
    3334#include <vfs.h>
    3435#include <mapper.h>
     
    5354    else if( type == DISPLAY_BUSYLOCKS         ) return "BUSYLOCKS";
    5455    else if( type == DISPLAY_MAPPER            ) return "MAPPER";
     56    else if( type == DISPLAY_BARRIER           ) return "BARRIER";
    5557    else                                         return "undefined";
    5658}
     
    8183#endif
    8284
    83     ////////////////////////////
    84     if( type == DISPLAY_STRING )
     85    switch( type )
    8586    {
    86         char      kbuf[512];
    87         uint32_t  length;
    88 
    89         char    * string = (char *)arg0;
    90 
    91         // check string in user space
    92         error = vmm_get_vseg( process , (intptr_t)arg0 , &vseg );
    93 
    94         if( error )
    95         {
     87        ////////////////////
     88        case DISPLAY_STRING:
     89        {
     90            char      kbuf[512];
     91            uint32_t  length;
     92
     93            char    * string = (char *)arg0;
     94
     95            // check string in user space
     96            error = vmm_get_vseg( process , (intptr_t)arg0 , &vseg );
     97
     98            if( error )
     99            {
    96100
    97101#if DEBUG_SYSCALLS_ERROR
     
    99103__FUNCTION__ , (intptr_t)arg0 );
    100104#endif
     105                this->errno = EINVAL;
     106                return -1;
     107            }
     108
     109            // ckeck string length
     110            length = hal_strlen_from_uspace( string );
     111
     112            if( length >= 512 )
     113            {
     114
     115#if DEBUG_SYSCALLS_ERROR
     116printk("\n[ERROR] in %s for STRING : string length %d too large\n",
     117__FUNCTION__ , length );
     118#endif
     119                this->errno = EINVAL;
     120                return -1;
     121            }
     122
     123            // copy string to kernel space
     124            hal_strcpy_from_uspace( kbuf , string , 512 );
     125
     126            // print message on TXT0 kernel terminal
     127            printk("\n%s / cycle %d\n", kbuf, (uint32_t)hal_get_cycles() );
     128
     129            break;
     130        }
     131        /////////////////
     132        case DISPLAY_VMM:
     133        {
     134            cxy_t cxy = (cxy_t)arg0;
     135            pid_t pid = (pid_t)arg1;
     136
     137            // check cxy argument
     138                if( cluster_is_undefined( cxy ) )
     139            {
     140
     141#if DEBUG_SYSCALLS_ERROR
     142printk("\n[ERROR] in %s for VMM : process %x in cluster %x not found\n",
     143__FUNCTION__ , pid , cxy );
     144#endif
     145                this->errno = EINVAL;
     146                return -1;
     147            }
     148
     149            // get extended pointer on process PID in cluster CXY
     150            xptr_t process_xp = cluster_get_process_from_pid_in_cxy( cxy , pid );
     151
     152                if( process_xp == XPTR_NULL )
     153            {
     154
     155#if DEBUG_SYSCALLS_ERROR
     156printk("\n[ERROR] in %s for VMM : process %x in cluster %x not found\n",
     157__FUNCTION__ , pid , cxy );
     158#endif
     159                this->errno = EINVAL;
     160                return -1;
     161            }
     162
     163            // get local pointer on process
     164            process_t * process = (process_t *)GET_PTR( process_xp );
     165
     166            // call kernel function
     167            if( cxy == local_cxy )
     168            {
     169                    vmm_display( process , true );
     170            }
     171            else
     172            {
     173                rpc_vmm_display_client( cxy , process , true );
     174            }
     175
     176            break;
     177        }
     178        ///////////////////
     179        case DISPLAY_SCHED:
     180        {
     181            cxy_t cxy = (cxy_t)arg0;
     182            lid_t lid = (lid_t)arg1;
     183
     184            // check cxy argument
     185                if( cluster_is_undefined( cxy ) )
     186            {
     187
     188#if DEBUG_SYSCALLS_ERROR
     189printk("\n[ERROR] in %s for SCHED : illegal cxy argument %x\n",
     190__FUNCTION__ , cxy );
     191#endif
     192                this->errno = EINVAL;
     193                return -1;
     194            }
     195
     196            // check lid argument
     197            if( lid >= LOCAL_CLUSTER->cores_nr )
     198            {
     199
     200#if DEBUG_SYSCALLS_ERROR
     201printk("\n[ERROR] in %s for SCHED : illegal lid argument %x\n",
     202__FUNCTION__ , lid );
     203#endif
     204                this->errno = EINVAL;
     205                return -1;
     206            }
     207
     208            if( cxy == local_cxy )
     209            {
     210                    sched_display( lid );
     211            }
     212            else
     213            {
     214                sched_remote_display( cxy , lid );
     215            }
     216
     217            break;
     218        }
     219        ///////////////////////////////
     220        case DISPLAY_CLUSTER_PROCESSES:
     221        {
     222            cxy_t  cxy   = (cxy_t)arg0;
     223            bool_t owned = (bool_t)arg1;
     224
     225            // check cxy argument
     226                if( cluster_is_undefined( cxy ) )
     227            {
     228
     229#if DEBUG_SYSCALLS_ERROR
     230printk("\n[ERROR] in %s for CLUSTER_PROCESSES : illegal cxy argument %x\n",
     231__FUNCTION__ , cxy );
     232#endif
     233                this->errno = EINVAL;
     234                return -1;
     235            }
     236
     237            cluster_processes_display( cxy , owned );
     238
     239            break;
     240        }
     241        /////////////////
     242        case DISPLAY_VFS:
     243        {
     244            vfs_display( process->vfs_root_xp );
     245
     246            break;
     247        }
     248        ///////////////////
     249        case DISPLAY_CHDEV:
     250        {
     251            chdev_dir_display();
     252
     253            break;
     254        }
     255        ///////////////////////////
     256        case DISPLAY_TXT_PROCESSES:
     257        {
     258            uint32_t txt_id = (uint32_t)arg0;
     259
     260            // check argument
     261                if( txt_id >= LOCAL_CLUSTER->nb_txt_channels )
     262            {
     263
     264#if DEBUG_SYSCALLS_ERROR
     265printk("\n[ERROR] in %s for TXT_PROCESSES : illegal txt_id argument %d\n",
     266__FUNCTION__ , txt_id );
     267#endif
     268                this->errno = EINVAL;
     269                return -1;
     270            }
     271
     272            process_txt_display( txt_id );
     273
     274            break;
     275        }
     276        //////////////////
     277        case DISPLAY_DQDT:
     278        {
     279            dqdt_display();
     280
     281            break;
     282        }
     283        ///////////////////////
     284        case DISPLAY_BUSYLOCKS:
     285        {
     286            pid_t   pid   = (pid_t)arg0;
     287            trdid_t trdid = (trdid_t)arg1;
     288
     289            // get extended pointer on target thread
     290            xptr_t thread_xp = thread_get_xptr( pid , trdid );
     291
     292            if( thread_xp == XPTR_NULL )
     293            {
     294
     295#if DEBUG_SYSCALLS_ERROR
     296printk("\n[ERROR] in %s for BUSYLOCKS : thread[%x,%x] not found\n",
     297__FUNCTION__ , pid, trdid );
     298#endif
     299                this->errno = EINVAL;
     300                return -1;
     301            }
     302
     303            thread_display_busylocks( thread_xp , __FUNCTION__ );
     304
     305            break;
     306        }
     307        ////////////////////
     308        case DISPLAY_MAPPER:
     309        {
     310            xptr_t        root_inode_xp;
     311            xptr_t        inode_xp;
     312            cxy_t         inode_cxy;
     313            vfs_inode_t * inode_ptr;
     314            xptr_t        mapper_xp;
     315            mapper_t    * mapper_ptr;
     316
     317            char          kbuf[CONFIG_VFS_MAX_PATH_LENGTH];
     318
     319            char     * path    = (char *)arg0;
     320            uint32_t   page_id = (uint32_t)arg1;
     321            uint32_t   nbytes  = (uint32_t)arg2;
     322
     323            // check pathname length
     324            if( hal_strlen_from_uspace( path ) >= CONFIG_VFS_MAX_PATH_LENGTH )
     325            {
     326
     327#if DEBUG_SYSCALLS_ERROR
     328printk("\n[ERROR] in %s for MAPPER : pathname too long\n",
     329 __FUNCTION__ );
     330#endif
     331                this->errno = ENFILE;
     332                return -1;
     333            }
     334
     335            // copy pathname in kernel space
     336            hal_strcpy_from_uspace( kbuf , path , CONFIG_VFS_MAX_PATH_LENGTH );
     337
     338            // compute root inode for pathname
     339            if( kbuf[0] == '/' )                        // absolute path
     340            {
     341                // use extended pointer on VFS root inode
     342                root_inode_xp = process->vfs_root_xp;
     343            }
     344            else                                        // relative path
     345            {
     346                // get cluster and local pointer on reference process
     347                xptr_t      ref_xp  = process->ref_xp;
     348                process_t * ref_ptr = (process_t *)GET_PTR( ref_xp );
     349                cxy_t       ref_cxy = GET_CXY( ref_xp );
     350
     351                // get extended pointer on CWD inode
     352                root_inode_xp = hal_remote_l64( XPTR( ref_cxy , &ref_ptr->cwd_xp ) );
     353            }
     354
     355            // get extended pointer on target inode
     356            error = vfs_lookup( root_inode_xp,
     357                                kbuf,
     358                                0,
     359                                &inode_xp,
     360                                NULL );
     361            if( error )
     362                {
     363
     364#if DEBUG_SYSCALLS_ERROR
     365printk("\n[ERROR] in %s for MAPPER : cannot found inode <%s>\n",
     366__FUNCTION__ , kbuf );
     367#endif
     368                        this->errno = ENFILE;
     369                        return -1;
     370                }
     371   
     372            // get target inode cluster and local pointer
     373            inode_cxy = GET_CXY( inode_xp );
     374            inode_ptr = GET_PTR( inode_xp );
     375
     376            // get extended pointer on target mapper
     377            mapper_ptr = hal_remote_lpt( XPTR( inode_cxy , &inode_ptr->mapper ) );
     378            mapper_xp  = XPTR( inode_cxy , mapper_ptr );
     379
     380            // display mapper
     381            error = mapper_display_page( mapper_xp , page_id , nbytes );
     382
     383            if( error )
     384                {
     385
     386#if DEBUG_SYSCALLS_ERROR
     387printk("\n[ERROR] in %s for MAPPER : cannot display page %d\n",
     388__FUNCTION__ , page_id );
     389#endif
     390                        this->errno = ENFILE;
     391                        return -1;
     392                }
     393
     394            break;
     395        }
     396        /////////////////////
     397        case DISPLAY_BARRIER:
     398        {
     399            // get target process PID
     400            pid_t pid = (pid_t)arg0;
     401
     402            // get pointers on owner process
     403            xptr_t      process_xp  = cluster_get_reference_process_from_pid( pid );
     404            process_t * process_ptr = GET_PTR( process_xp );
     405            cxy_t       process_cxy = GET_CXY( process_xp );
     406
     407            if( process_xp == XPTR_NULL )
     408            {
     409
     410#if DEBUG_SYSCALLS_ERROR
     411printk("\n[ERROR] in %s for BARRIER : process %x not found\n",
     412__FUNCTION__ , pid );
     413#endif
     414                this->errno = EINVAL;
     415                return -1;
     416            }
     417
     418            // get extended pointer on root of list of barriers
     419            xptr_t root_xp = XPTR( process_cxy , &process_ptr->barrier_root );
     420
     421            if( xlist_is_empty( root_xp ) )
     422            {
     423
     424#if DEBUG_SYSCALLS_ERROR
     425printk("\n[ERROR] in %s for BARRIER : no registered barrier in process %x\n",
     426__FUNCTION__ , pid );
     427#endif
     428                this->errno = EINVAL;
     429                return -1;
     430            }
     431
     432            // get extended pointer on first registered generic barrier descriptor
     433            xptr_t gen_barrier_xp  = XLIST_FIRST( root_xp , generic_barrier_t , list );
     434
     435            // display barrier state
     436            generic_barrier_display( gen_barrier_xp );
     437
     438            break;
     439        }
     440        ////////
     441        default:
     442        {
     443
     444#if DEBUG_SYSCALLS_ERROR
     445printk("\n[ERROR] in %s : undefined display type %d\n",
     446        __FUNCTION__ , type );
     447#endif
    101448            this->errno = EINVAL;
    102449            return -1;
    103450        }
    104 
    105         // ckeck string length
    106         length = hal_strlen_from_uspace( string );
    107 
    108         if( length >= 512 )
    109         {
    110 
    111 #if DEBUG_SYSCALLS_ERROR
    112 printk("\n[ERROR] in %s for STRING : string length %d too large\n",
    113 __FUNCTION__ , length );
    114 #endif
    115             this->errno = EINVAL;
    116             return -1;
    117         }
    118 
    119         // copy string to kernel space
    120         hal_strcpy_from_uspace( kbuf , string , 512 );
    121 
    122         // print message on TXT0 kernel terminal
    123         printk("\n%s / cycle %d\n", kbuf, (uint32_t)hal_get_cycles() );
    124     }
    125     //////////////////////////////
    126     else if( type == DISPLAY_VMM )
    127     {
    128         cxy_t cxy = (cxy_t)arg0;
    129         pid_t pid = (pid_t)arg1;
    130 
    131         // check cxy argument
    132             if( cluster_is_undefined( cxy ) )
    133         {
    134 
    135 #if DEBUG_SYSCALLS_ERROR
    136 printk("\n[ERROR] in %s for VMM : process %x in cluster %x not found\n",
    137 __FUNCTION__ , pid , cxy );
    138 #endif
    139             this->errno = EINVAL;
    140             return -1;
    141         }
    142 
    143         // get extended pointer on process PID in cluster CXY
    144         xptr_t process_xp = cluster_get_process_from_pid_in_cxy( cxy , pid );
    145 
    146             if( process_xp == XPTR_NULL )
    147         {
    148 
    149 #if DEBUG_SYSCALLS_ERROR
    150 printk("\n[ERROR] in %s for VMM : process %x in cluster %x not found\n",
    151 __FUNCTION__ , pid , cxy );
    152 #endif
    153             this->errno = EINVAL;
    154             return -1;
    155         }
    156 
    157         // get local pointer on process
    158         process_t * process = (process_t *)GET_PTR( process_xp );
    159 
    160         // call kernel function
    161         if( cxy == local_cxy )
    162         {
    163                 vmm_display( process , true );
    164         }
    165         else
    166         {
    167             rpc_vmm_display_client( cxy , process , true );
    168         }
    169     }
    170     ////////////////////////////////
    171     else if( type == DISPLAY_SCHED )
    172     {
    173         cxy_t cxy = (cxy_t)arg0;
    174         lid_t lid = (lid_t)arg1;
    175 
    176         // check cxy argument
    177             if( cluster_is_undefined( cxy ) )
    178         {
    179 
    180 #if DEBUG_SYSCALLS_ERROR
    181 printk("\n[ERROR] in %s for SCHED : illegal cxy argument %x\n",
    182 __FUNCTION__ , cxy );
    183 #endif
    184             this->errno = EINVAL;
    185             return -1;
    186         }
    187 
    188         // check lid argument
    189         if( lid >= LOCAL_CLUSTER->cores_nr )
    190         {
    191 
    192 #if DEBUG_SYSCALLS_ERROR
    193 printk("\n[ERROR] in %s for SCHED : illegal lid argument %x\n",
    194 __FUNCTION__ , lid );
    195 #endif
    196             this->errno = EINVAL;
    197             return -1;
    198         }
    199 
    200         if( cxy == local_cxy )
    201         {
    202                 sched_display( lid );
    203         }
    204         else
    205         {
    206             sched_remote_display( cxy , lid );
    207         }
    208     }
    209     ////////////////////////////////////////////
    210     else if( type == DISPLAY_CLUSTER_PROCESSES )
    211     {
    212         cxy_t  cxy   = (cxy_t)arg0;
    213         bool_t owned = (bool_t)arg1;
    214 
    215         // check cxy argument
    216             if( cluster_is_undefined( cxy ) )
    217         {
    218 
    219 #if DEBUG_SYSCALLS_ERROR
    220 printk("\n[ERROR] in %s for CLUSTER_PROCESSES : illegal cxy argument %x\n",
    221 __FUNCTION__ , cxy );
    222 #endif
    223             this->errno = EINVAL;
    224             return -1;
    225         }
    226 
    227         cluster_processes_display( cxy , owned );
    228     }
    229     //////////////////////////////
    230     else if( type == DISPLAY_VFS )
    231     {
    232         vfs_display( process->vfs_root_xp );
    233     }
    234     ////////////////////////////////
    235     else if( type == DISPLAY_CHDEV )
    236     {
    237         chdev_dir_display();
    238     }
    239     ////////////////////////////////////////
    240     else if( type == DISPLAY_TXT_PROCESSES )
    241     {
    242         uint32_t txt_id = (uint32_t)arg0;
    243 
    244         // check argument
    245             if( txt_id >= LOCAL_CLUSTER->nb_txt_channels )
    246         {
    247 
    248 #if DEBUG_SYSCALLS_ERROR
    249 printk("\n[ERROR] in %s for TXT_PROCESSES : illegal txt_id argument %d\n",
    250 __FUNCTION__ , txt_id );
    251 #endif
    252             this->errno = EINVAL;
    253             return -1;
    254         }
    255 
    256         process_txt_display( txt_id );
    257     }
    258     ///////////////////////////////
    259     else if( type == DISPLAY_DQDT )
    260     {
    261         dqdt_display();
    262     }
    263     ////////////////////////////////////
    264     else if( type == DISPLAY_BUSYLOCKS )
    265     {
    266         pid_t   pid   = (pid_t)arg0;
    267         trdid_t trdid = (trdid_t)arg1;
    268 
    269         // get extended pointer on target thread
    270         xptr_t thread_xp = thread_get_xptr( pid , trdid );
    271 
    272         if( thread_xp == XPTR_NULL )
    273         {
    274 
    275 #if DEBUG_SYSCALLS_ERROR
    276 printk("\n[ERROR] in %s for BUSYLOCKS : thread[%x,%x] not found\n",
    277 __FUNCTION__ , pid, trdid );
    278 #endif
    279             this->errno = EINVAL;
    280             return -1;
    281         }
    282 
    283         thread_display_busylocks( thread_xp , __FUNCTION__ );
    284     }
    285     /////////////////////////////////
    286     else if( type == DISPLAY_MAPPER )
    287     {
    288         xptr_t        root_inode_xp;
    289         xptr_t        inode_xp;
    290         cxy_t         inode_cxy;
    291         vfs_inode_t * inode_ptr;
    292         xptr_t        mapper_xp;
    293         mapper_t    * mapper_ptr;
    294 
    295         char          kbuf[CONFIG_VFS_MAX_PATH_LENGTH];
    296 
    297         char     * path    = (char *)arg0;
    298         uint32_t   page_id = (uint32_t)arg1;
    299         uint32_t   nbytes  = (uint32_t)arg2;
    300 
    301         // check pathname length
    302         if( hal_strlen_from_uspace( path ) >= CONFIG_VFS_MAX_PATH_LENGTH )
    303         {
    304 
    305 #if DEBUG_SYSCALLS_ERROR
    306 printk("\n[ERROR] in %s for MAPPER : pathname too long\n",
    307  __FUNCTION__ );
    308 #endif
    309             this->errno = ENFILE;
    310             return -1;
    311         }
    312 
    313         // copy pathname in kernel space
    314         hal_strcpy_from_uspace( kbuf , path , CONFIG_VFS_MAX_PATH_LENGTH );
    315 
    316         // compute root inode for pathname
    317         if( kbuf[0] == '/' )                        // absolute path
    318         {
    319             // use extended pointer on VFS root inode
    320             root_inode_xp = process->vfs_root_xp;
    321         }
    322         else                                        // relative path
    323         {
    324             // get cluster and local pointer on reference process
    325             xptr_t      ref_xp  = process->ref_xp;
    326             process_t * ref_ptr = (process_t *)GET_PTR( ref_xp );
    327             cxy_t       ref_cxy = GET_CXY( ref_xp );
    328 
    329             // use extended pointer on CWD inode
    330             root_inode_xp = hal_remote_l64( XPTR( ref_cxy , &ref_ptr->cwd_xp ) );
    331         }
    332 
    333         // get extended pointer on target inode
    334         error = vfs_lookup( root_inode_xp,
    335                             kbuf,
    336                             0,
    337                             &inode_xp,
    338                             NULL );
    339         if( error )
    340             {
    341 
    342 #if DEBUG_SYSCALLS_ERROR
    343 printk("\n[ERROR] in %s for MAPPER : cannot found inode <%s>\n",
    344 __FUNCTION__ , kbuf );
    345 #endif
    346                     this->errno = ENFILE;
    347                     return -1;
    348             }
    349    
    350         // get target inode cluster and local pointer
    351         inode_cxy = GET_CXY( inode_xp );
    352         inode_ptr = GET_PTR( inode_xp );
    353 
    354         // get extended pointer on target mapper
    355         mapper_ptr = hal_remote_lpt( XPTR( inode_cxy , &inode_ptr->mapper ) );
    356         mapper_xp  = XPTR( inode_cxy , mapper_ptr );
    357 
    358         // display mapper
    359         error = mapper_display_page( mapper_xp , page_id , nbytes );
    360 
    361         if( error )
    362             {
    363 
    364 #if DEBUG_SYSCALLS_ERROR
    365 printk("\n[ERROR] in %s for MAPPER : cannot display page %d\n",
    366 __FUNCTION__ , page_id );
    367 #endif
    368                     this->errno = ENFILE;
    369                     return -1;
    370             }
    371     }
    372     ////
    373     else
    374     {
    375 
    376 #if DEBUG_SYSCALLS_ERROR
    377 printk("\n[ERROR] in %s : undefined display type %d\n",
    378         __FUNCTION__ , type );
    379 #endif
    380         this->errno = EINVAL;
    381         return -1;
    382     }
     451    }  // end switch on type
    383452
    384453#if (DEBUG_SYS_DISPLAY || CONFIG_INSTRUMENTATION_SYSCALLS)
  • trunk/kernel/syscalls/sys_mmap.c

    r611 r623  
    5656
    5757#if DEBUG_SYS_MMAP
    58 tm_start = hal_get_cycles();
    59 if ( DEBUG_SYS_MMAP < tm_start )
     58if( DEBUG_SYS_MMAP < tm_start )
    6059printk("\n[%s] thread[%x,%x] enter / cycle %d\n",
    6160__FUNCTION__, process->pid, this->trdid, (uint32_t)tm_start );
     
    314313#endif
    315314
     315#if CONFIG_INSTRUMENTATION_SYSCALLS
     316hal_atomic_add( &syscalls_cumul_cost[SYS_MMAP] , tm_end - tm_start );
     317hal_atomic_add( &syscalls_occurences[SYS_MMAP] , 1 );
     318#endif
     319
    316320#if DEBUG_SYS_MMAP
    317 if ( DEBUG_SYS_MMAP < tm_start )
     321if ( DEBUG_SYS_MMAP < tm_end )
    318322printk("\n[%s] thread[%x,%x] exit / %s / cxy %x / base %x / size %d / cycle %d\n",
    319323__FUNCTION__, process->pid, this->trdid,
  • trunk/kernel/syscalls/sys_munmap.c

    r506 r623  
    2525#include <hal_kernel_types.h>
    2626#include <hal_uspace.h>
     27#include <hal_irqmask.h>
    2728#include <shared_syscalls.h>
    2829#include <errno.h>
     
    4142{
    4243    error_t       error;
     44    vseg_t      * vseg;
     45    reg_t         save_sr;      // required to enable IRQs
    4346
    4447        thread_t    * this    = CURRENT_THREAD;
    4548        process_t   * process = this->process;
    4649
     50#if (DEBUG_SYS_MUNMAP || CONFIG_INSTRUMENTATION_SYSCALLS)
     51uint64_t     tm_start = hal_get_cycles();
     52#endif
     53
    4754#if DEBUG_SYS_MUNMAP
    48 uint64_t tm_start;
    49 uint64_t tm_end;
    50 tm_start = hal_get_cycles();
    5155if( DEBUG_SYS_MUNMAP < tm_start )
    52 printk("\n[DBG] %s : thread %x enter / process %x / cycle %d\n"
     56printk("\n[DBG] %s : thread %x enter / process %x / cycle %d\n",
    5357__FUNCTION__ , this, process->pid, (uint32_t)tm_start );
    5458#endif
     59
     60    // check user buffer is mapped
     61    error = vmm_get_vseg( process , (intptr_t)vaddr, &vseg );
     62
     63    if( error )
     64    {
     65
     66#if DEBUG_SYSCALLS_ERROR
     67printk("\n[ERROR] in %s : thread[%x,%x] / user buffer unmapped %x\n",
     68__FUNCTION__ , process->pid, this->trdid, (intptr_t)vaddr );
     69vmm_display( process , false );
     70#endif
     71                this->errno = EINVAL;
     72                return -1;
     73    }
     74
     75    // enable IRQs
     76    hal_enable_irq( &save_sr );
    5577
    5678    // call relevant kernel function
     
    6789    }
    6890
     91    // restore IRQs
     92    hal_restore_irq( save_sr );
     93
     94#if (DEBUG_SYS_MUNMAP || CONFIG_INSTRUMENTATION_SYSCALLS)
     95uint64_t     tm_end = hal_get_cycles();
     96#endif
     97
     98#if CONFIG_INSTRUMENTATION_SYSCALLS
     99hal_atomic_add( &syscalls_cumul_cost[SYS_MUNMAP] , tm_end - tm_start );
     100hal_atomic_add( &syscalls_occurences[SYS_MUNMAP] , 1 );
     101#endif
     102
    69103#if DEBUG_SYS_MUNMAP
    70 tm_end = hal_get_cycles();
    71104if( DEBUG_SYS_MUNMAP < tm_start )
    72 printk("\n[DBG] %s : thread %x exit / process %x / cycle %d\n"
     105printk("\n[DBG] %s : thread %x exit / process %x / cycle %d\n",
    73106__FUNCTION__ , this, process->pid, (uint32_t)tm_end );
    74107#endif
  • trunk/kernel/syscalls/sys_place_fork.c

    r584 r623  
    22 * sys_get_core.c - get calling core cluster and local index.
    33 *
    4  * Author    Alain Greiner (2016,2017)
     4 * Author    Alain Greiner (2016,2017,2018,2019)
    55 * 
    66 * Copyright (c) UPMC Sorbonne Universites
  • trunk/kernel/syscalls/sys_write.c

    r610 r623  
    173173        }
    174174
    175         // update size field in inode if required
    176         xptr_t   size_xp    = XPTR( file_cxy , &inode_ptr->size );
    177         uint32_t inode_size = hal_remote_l32( size_xp );
    178         if ( (file_offset + count) > inode_size )
    179         {
    180             hal_remote_s32( size_xp , file_offset + count );
    181         }
     175        // update file size in inode descriptor
     176        // only if (file_offset + count) > current_size
     177        // note: the parent directory entry in mapper will
     178        // be updated by the close syscall     
     179        xptr_t inode_xp = XPTR( file_cxy , inode_ptr );
     180        vfs_inode_update_size( inode_xp , file_offset + count );
     181
    182182    }
    183183    else if( file_type == INODE_TYPE_DEV )  // write to TXT device
  • trunk/kernel/syscalls/syscalls.h

    r619 r623  
    236236/******************************************************************************************
    237237 * [15] This function writes bytes to an open file identified by its file descriptor.
    238  * The file can be a regular file or character oriented device.
     238 * The file can be a regular file or character oriented device. For a regular file,
     239 * the target inode "size" field is updated if (offset + count) is larger than the
     240 * current "size" value. The size value registered in the mappers of the parent(s)
     241 * directory are not modified and will be asynchronously updated when the file is closed.
    239242 * IRQs are enabled during this system call.
    240243 ******************************************************************************************
     
    329332
    330333/******************************************************************************************
    331  * [23] This function open a directory, that must exist in the file system, returning
    332  * a DIR pointer on the dirent array in user space.
     334 * [23] This function creates an user level directory descriptor (including the associated
     335 * array of user level dirents), and intialise it from the kernel directory mapper, that
     336 * contains all entries in this directory). The directory is identified by the <pathname>
     337 * argument. If the corresponding inode is missing in the Inode Tree, the inode is created,
     338 * but the directory must exist in the file system.
     339 * It returns a DIR pointer <dirp> on the dirent array in user space.
    333340 ******************************************************************************************
    334341 * @ pathname   : [in]  pathname (can be relative or absolute).
  • trunk/libs/libalmosmkh/almosmkh.c

    r611 r623  
    288288                             (reg_t)page_id,
    289289                             (reg_t)nbytes );
     290}
     291
     292///////////////////////////////////////
     293int display_barrier( unsigned int pid )
     294{
     295    return hal_user_syscall( SYS_DISPLAY,
     296                             DISPLAY_BARRIER,
     297                             (reg_t)pid, 0, 0 );
    290298}
    291299
  • trunk/libs/libalmosmkh/almosmkh.h

    r611 r623  
    227227                    unsigned int  nbytes);
    228228
     229/***************************************************************************************
     230 * This debug syscall displays on the kernel terminal TXT0
     231 * the state of the barrier used by the process identified by the <pid> argument.
     232 * It can be called by any thread running in any cluster.
     233 ***************************************************************************************
     234 * @ pid      : [in] process identifier.
     235 * @ return 0 if success / return -1 if illegal arguments.
     236 **************************************************************************************/
     237int display_barrier( unsigned int pid );
     238
    229239/*****************************************************************************************
    230240* This debug syscall is used to activate / desactivate the context switches trace
  • trunk/libs/mini-libc/mman.h

    r597 r623  
    3838 * virtual space, as defined by the arguments.
    3939 *****************************************************************************************
    40  * @ addr    : requested address in virtual space / unused : should be NULL.
     40 * @ addr    : requested address in virtual space / unsupported : should be NULL.
    4141 * @ length  : requested number of bytes.
    4242 * @ prot    : access mode bit vector (PROT_EXEC / PROT_READ / PROT_WRITE)
    43  * @ flags   : bit_vector (MAP_FILE / MAP_ANON / MAPREMOTE / MAP_PRIVATE / MAP_SHARED)
     43 * @ flags   : bit_vector (MAP_FILE / MAP_ANON / MAP_REMOTE / MAP_PRIVATE / MAP_SHARED)
    4444 * @ fdid    : file descriptor index (if MAP_FILE).
    4545 * @ offset  : offset in file (if MAP_FILE).
  • trunk/libs/mini-libc/stdio.c

    r610 r623  
    3535////////////////////////////////////////////////////////////////////////////////////////
    3636
     37// This user space array registers all FILE descriptors open by a given process
    3738FILE open_file_array[MAX_OPEN_FILE_PER_PROCESS];  // array of open files structures
    3839
     
    340341    if( mode != NULL )
    341342    {
    342         printf("\n[ERROR] in %s : the mode argument must be NULL\n", __FUNCTION__ );
     343        printf("\n[%s] error : the mode argument must be NULL\n", __FUNCTION__ );
    343344        return NULL;
    344345    }
     
    351352    if( fd < 0 )
    352353    {
    353         printf("\n[ERROR] in %s : file %s not found\n", __FUNCTION__ , pathname );
     354        printf("\n[%s] error : file <%s> not found\n", __FUNCTION__ , pathname );
    354355        return NULL;
    355356    }
    356357    if( fd > MAX_OPEN_FILE_PER_PROCESS )
    357358    {
    358         printf("\n[ERROR] in %s : not enough space for file %s\n", __FUNCTION__ , pathname );
     359        printf("\n[%s] error : not enough space for file <%s>\n", __FUNCTION__ , pathname );
    359360        return NULL;
    360361    }
     
    365366
    366367    return &open_file_array[fd];
     368
    367369}  // end fopen()
    368370
     
    376378    int fd = stream->fd;
    377379
    378     // remove stream from open_file_array[]
     380    // remove stream from user open_file_array[]
    379381    open_file_array[fd].key = 0;
    380382   
    381     return close( fd );
     383    // close the kernel file descriptor
     384    if( close( fd ) )
     385    {
     386        printf("\n[%s] error : cannot close file %d\n", __FUNCTION__ , fd );
     387        return -1;
     388    }
     389
     390    return 0;
     391
    382392}  // end fclose()
    383393
     
    407417        // get file descriptor from file pointer
    408418        fd = stream->fd;
    409        
     419
     420        // set terminating NUL
    410421        string[count] = 0;
    411422
     423printf("\n[%s] fd = %d for string :\n", __FUNCTION__, fd, string );
     424
    412425        return write( fd , &string , count );
    413426    }
  • trunk/libs/mini-libc/stdio.h

    r610 r623  
    4040 ********************************************************************************************/
    4141
    42 typedef struct file_s
     42typedef struct stream_s
    4343{
    44     int fd;
    45     int key;
     44    int fd;          // index in both kernel fd_array[], and user open_file_array[]
     45    int key;         // entry valid in open_file_array[] when (key == VALID_OPEN_FILE)
    4646}
    4747FILE;
  • trunk/params-hard.mk

    r620 r623  
    22
    33ARCH      = /users/alain/soc/tsar-trunk-svn-2013/platforms/tsar_generic_iob
    4 X_SIZE    = 2
    5 Y_SIZE    = 2
     4X_SIZE    = 1
     5Y_SIZE    = 1
    66NB_PROCS  = 1
    77NB_TTYS   = 3
  • trunk/user/init/init.c

    r588 r623  
    2626// and avoid the hard_config include [AG]
    2727
    28 // TODO introduce a communication channel between INIT and KSH
    29 // to allow KSH to signal INIT the exec completion.
    30 
    3128////////////////
    3229int main( void )
     
    4037
    4138#if DEBUG_PROCESS_INIT
    42 display_string("[INIT] process enters");
     39display_string("[init] process enters");
    4340#endif
    4441
     
    5956        {
    6057            // INIT display error message 
    61             snprintf( string , 64 , "[INIT ERROR] cannot fork child[%d] => suicide" , i );
     58            snprintf( string , 64 , "[init ERROR] cannot fork child[%d] => suicide" , i );
    6259            display_string( string );
    6360
     
    7471                // CHILD[i] display error message
    7572                snprintf( string , 64 ,
    76                 "[INIT ERROR] CHILD[%d] cannot exec KSH / ret_exec = %d" , i , ret_exec );
     73                "[init ERROR] CHILD[%d] cannot exec KSH / ret_exec = %d" , i , ret_exec );
    7774                display_string( string );
    7875            }
     
    8178        {
    8279            // INIT display CHILD[i] process PID
    83             snprintf( string , 64 , "[INIT] created KSH[%d] / pid = %x", i , ret_fork );
     80            snprintf( string , 64 , "[init] created KSH[%d] / pid = %x", i , ret_fork );
    8481            display_string( string );
    8582
     
    128125        {
    129126            // display string to report unexpected KSH process block
    130             snprintf( string , 64 , "[INIT] KSH process %x stopped => unblock it" , rcv_pid );
     127            snprintf( string , 64 , "[init] KSH process %x stopped => unblock it" , rcv_pid );
    131128            display_string( string );
    132129
     
    138135        {
    139136            // display string to report KSH process termination
    140             snprintf( string , 64 , "[INIT] KSH process %x terminated => recreate", rcv_pid );
     137            snprintf( string , 64 , "[init] KSH process %x terminated => recreate", rcv_pid );
    141138            display_string( string );
    142139
     
    147144            {
    148145                // INIT display error message
    149                 snprintf( string , 64 , "[INIT ERROR] cannot fork child => suicide");
     146                snprintf( string , 64 , "[init ERROR] cannot fork child => suicide");
    150147                display_string( string );
    151148
     
    161158                {
    162159                    // CHILD display error message on TXT0 terminal
    163                     snprintf( string , 64 , "[INIT ERROR] CHILD cannot exec KSH" );
     160                    snprintf( string , 64 , "[init ERROR] CHILD cannot exec KSH" );
    164161                    display_string( string );
    165162                }
     
    168165            {
    169166                // INIT display new KSH process PID
    170                 snprintf( string , 64 , "[INIT] re-created KSH / pid = %x", ret_fork );
     167                snprintf( string , 64 , "[init] re-created KSH / pid = %x", ret_fork );
    171168                display_string( string );
    172169            }
  • trunk/user/ksh/ksh.c

    r619 r623  
    5858#define DEBUG_INTER         0
    5959#define DEBUG_PARSE         0
    60 #define DEBUG_CMD_CAT       0
     60#define DEBUG_CMD_CAT       1
    6161#define DEBUG_CMD_CP        0
    6262#define DEBUG_CMD_LOAD      0
     
    122122        if (argc != 2)
    123123    {
    124         fd   = -1;
    125         buf  = NULL;
    126         size = 0;
    127124                printf("  usage: cat pathname\n");
    128             goto cmd_cat_exit;
     125
     126        sem_post( &semaphore );
     127            return;
    129128    }
    130129
     
    135134    if (fd < 0)
    136135    {
    137         buf  = NULL;
    138         size = 0;
    139136            printf("  error: cannot open file <%s>\n", path);
    140             goto cmd_cat_exit;
     137
     138        sem_post( &semaphore );
     139            return;
    141140    }
    142141
    143142#if DEBUG_CMD_CAT
    144 snprintf( string , 64 , "[KSH] %s : file %s open", __FUNCTION__, path );
     143snprintf( string , 64 , "[ksh] %s : file %s open", __FUNCTION__, path );
    145144display_string( string );
    146145#endif
     
    149148    if ( stat( path , &st ) == -1)
    150149    {
    151         buf  = NULL;
    152         size = 0;
    153150            printf("  error: cannot stat <%s>\n", path);
    154             goto cmd_cat_exit;
     151
     152            close(fd);
     153        sem_post( &semaphore );
     154            return;
    155155    }
    156156
    157157        if ( S_ISDIR(st.st_mode) )
    158158    {
    159         buf  = NULL;
    160         size = 0;
    161159            printf("  error: <%s> is a directory\n", path);
    162             goto cmd_cat_exit;
     160
     161            close(fd);
     162        sem_post( &semaphore );
     163            return;
    163164    }
    164165
     
    167168
    168169#if DEBUG_CMD_CAT
    169 snprintf( string , 64 , "[KSH] %s : get size = %d", __FUNCTION__, size );
    170 display_string( string );
    171 #endif
    172 
    173     // MAP_FILE is default type when MAP_ANON and MAP_REMOTE are not specified
     170snprintf( string , 64 , "[ksh] %s : size = %d", __FUNCTION__, size );
     171display_string( string );
     172#endif
     173
     174    if( size == 0 )
     175    {
     176            printf("  error: size = 0 for <%s>\n", path);
     177
     178            close(fd);
     179        sem_post( &semaphore );
     180            return;
     181    }
     182
     183    // mapping type is MAP_FILE when MAP_ANON and MAP_REMOTE are not specified
    174184    buf = mmap( NULL , size , PROT_READ|PROT_WRITE , MAP_PRIVATE , fd , 0 );
    175185
     
    177187    {
    178188            printf("  error: cannot map file <%s>\n", path );
    179             goto cmd_cat_exit;
     189
     190            close(fd);
     191        sem_post( &semaphore );
     192            return;
    180193    }
    181194
    182195#if DEBUG_CMD_CAT
    183 snprintf( string , 64 , "[KSH] %s : map file %d to buffer %x", __FUNCTION__, fd , buf );
    184 display_string( string );
    185 display_vmm( 0 , getpid() );
     196snprintf( string , 64 , "[ksh] %s : maped file %d to buffer %x", __FUNCTION__, fd , buf );
     197display_string( string );
     198// unsigned int pid = getpid();
     199// unsigned int cxy = pid >> 16;
     200// display_vmm( cxy , pid );
    186201#endif
    187202
     
    189204    write( 1 , buf , size );
    190205
    191     // release semaphore to get next command
    192     sem_post( &semaphore );
    193 
    194     return;
    195 
    196 cmd_cat_exit:
    197 
    198         if (buf != NULL) munmap(buf, size);
    199         if (fd >= 0) close(fd);
     206    // unmap te file
     207    if( munmap( buf , size ) )
     208    {
     209            printf("  error: cannot unmap file <%s>\n", path );
     210    }
     211
     212#if DEBUG_CMD_CAT
     213snprintf( string , 64 , "[ksh] %s : unmaped file %d from buffer %x", __FUNCTION__, fd , buf );
     214display_string( string );
     215// display_vmm( cxy , pid );
     216#endif
     217
     218    // close the file
     219        if( close( fd ) )
     220    {
     221            printf("  error: cannot close file <%s>\n", path );
     222    }
    200223
    201224    // release semaphore to get next command
     
    267290
    268291#if DEBUG_CMD_CP
    269 snprintf( string , 64 , "[KSH] %s : file %s open", __FUNCTION__, srcpath );
     292snprintf( string , 64 , "[ksh] %s : file %s open", __FUNCTION__, srcpath );
    270293display_string( string );
    271294#endif
     
    280303
    281304#if DEBUG_CMD_CP
    282 snprintf( string , 64 , "[KSH] %s : got stats for %s", __FUNCTION__, srcpath );
     305snprintf( string , 64 , "[ksh] %s : got stats for %s", __FUNCTION__, srcpath );
    283306display_string( string );
    284307#endif
     
    304327
    305328#if DEBUG_CMD_CP
    306 snprintf( string , 64 , "[KSH] %s : file %s open", __FUNCTION__, dstpath );
     329snprintf( string , 64 , "[ksh] %s : file %s open", __FUNCTION__, dstpath );
    307330display_string( string );
    308331#endif
     
    315338
    316339#if DEBUG_CMD_CP
    317 snprintf( string , 64 , "[KSH] %s : got stats for %s", __FUNCTION__, dstpath );
     340snprintf( string , 64 , "[ksh] %s : got stats for %s", __FUNCTION__, dstpath );
    318341display_string( string );
    319342#endif
     
    339362
    340363#if DEBUG_CMD_CP
    341 snprintf( string , 64 , "[KSH] %s : read %d bytes from %s", __FUNCTION__, len, srcpath );
     364snprintf( string , 64 , "[ksh] %s : read %d bytes from %s", __FUNCTION__, len, srcpath );
    342365display_string( string );
    343366#endif
     
    351374
    352375#if DEBUG_CMD_CP
    353 snprintf( string , 64 , "[KSH] %s : write %d bytes to %s", __FUNCTION__, len, dstpath );
     376snprintf( string , 64 , "[ksh] %s : write %d bytes to %s", __FUNCTION__, len, dstpath );
    354377display_string( string );
    355378#endif
     
    381404               "         display  dqdt\n"             
    382405               "         display  locks    pid    trdid\n"
     406               "         display  barrier  pid\n"
    383407               "         display  mapper   path   page_id  nbytes\n");
    384408    }
     
    504528            {
    505529                printf("  error: illegal arguments pid = %x / trdid = %x\n", pid, trdid );
     530            }
     531        }
     532    }
     533    /////////////////////////////////////////////////
     534    else if( strcmp( argv[1] , "barrier" ) == 0 )
     535    {
     536        if( argc != 3 )
     537        {
     538                    printf("  usage: display barrier pid\n");
     539            }
     540        else
     541        {
     542                unsigned int pid   = atoi(argv[2]);
     543
     544            if( display_barrier( pid ) )
     545            {
     546                printf("  error: illegal arguments pid = %x\n", pid );
    506547            }
    507548        }
     
    678719
    679720#if DEBUG_CMD_LOAD
    680 snprintf( string , 64 , "[KSH] %s : ksh_pid %x / path %s / bg %d / place %d (%x)\n",
     721snprintf( string , 64 , "[ksh] %s : ksh_pid %x / path %s / bg %d / place %d (%x)\n",
    681722__FUNCTION__, ksh_pid, argv[1], background, placement, cxy );
    682723display_string( string );
     
    697738
    698739#if DEBUG_CMD_LOAD
    699 snprintf( string , 64 , "[KSH] %s : child_pid %x after fork, before exec\n",
     740snprintf( string , 64 , "[ksh] %s : child_pid %x after fork, before exec\n",
    700741__FUNCTION__ , getpid() );
    701742display_string( string );
     
    706747
    707748#if DEBUG_CMD_LOAD
    708 snprintf( string , 64 , "[KSH] %s : child_pid %x after exec / ret_exec %x\n",
     749snprintf( string , 64 , "[ksh] %s : child_pid %x after exec / ret_exec %x\n",
    709750__FUNCTION__ , getpid(), ret_exec );
    710751display_string( string );
     
    722763
    723764#if DEBUG_CMD_LOAD
    724 snprintf( string , 64 , "[KSH] %s : ksh_pid %x after fork / ret_fork %x\n",
     765snprintf( string , 64 , "[ksh] %s : ksh_pid %x after fork / ret_fork %x\n",
    725766__FUNCTION__, getpid(), ret_fork );
    726767display_string( string );
     
    795836
    796837#if DEBUG_CMD_LS
    797 snprintf( string , 64 , "[KSH] %s : directory <%s> open / DIR %x\n",
     838snprintf( string , 64 , "[ksh] %s : directory <%s> open / DIR %x\n",
    798839__FUNCTION__, pathname , dir );
    799840display_string( string );
     
    803844            {
    804845                    printf("  error : directory <%s> not found\n", pathname );
    805             goto cmd_ls_exit;
     846
     847            sem_post( &semaphore );
     848            return;
    806849            }
    807850
     
    816859
    817860#if DEBUG_CMD_LS
    818 snprintf( string , 64 , "[KSH] %s : directory <%s> closed\n",
     861snprintf( string , 64 , "[ksh] %s : directory <%s> closed\n",
    819862__FUNCTION__, pathname );
    820863display_string( string );
     
    822865
    823866    }
    824 
    825 cmd_ls_exit:
    826867
    827868    // release semaphore to get next command
     
    908949
    909950#if DEBUG_CMD_PS
    910 snprintf( string , 64 , "\n[KSH] %s : call display_cluster_process()", __FUNCTION__ );
     951snprintf( string , 64 , "\n[ksh] %s : call display_cluster_process()", __FUNCTION__ );
    911952display_string( string );
    912953#endif
     
    10711112#if DEBUG_PARSE
    10721113char string[64];
    1073 snprintf( string , 64 , "\n[KSH] %s : <%s>", __FUNCTION__ , buf );
     1114snprintf( string , 64 , "\n[ksh] %s : <%s>", __FUNCTION__ , buf );
    10741115display_string( string );
    10751116#endif
     
    10941135
    10951136#if DEBUG_PARSE
    1096 snprintf( string , 64 , "\n[KSH] %s : argc = %d for <%s>", __FUNCTION__ , argc , argv[0] );
     1137snprintf( string , 64 , "\n[ksh] %s : argc = %d for <%s>", __FUNCTION__ , argc , argv[0] );
    10971138display_string( string );
    10981139#endif
     
    11931234#if DEBUG_INTER
    11941235unsigned int pid = getpid();
    1195 snprintf( string , 64 , "\n[KSH] %s : request a new command", __FUNCTION__ );
     1236snprintf( string , 64 , "\n[ksh] %s : request a new command", __FUNCTION__ );
    11961237display_string( string );
    11971238#endif
     
    12301271
    12311272#if DEBUG_INTER
    1232 snprintf( string , 64 , "[KSH] %s : parse and execute <%s>", __FUNCTION__, cmd );
     1273snprintf( string , 64 , "[ksh] %s : parse and execute <%s>", __FUNCTION__, cmd );
    12331274display_string( string );
    12341275#endif
     
    13501391
    13511392#if DEBUG_INTER
    1352 snprintf( string , 64 , "\n[KSH] %s : complete <%s> command", __FUNCTION__, cmd );
     1393snprintf( string , 64 , "\n[ksh] %s : complete <%s> command", __FUNCTION__, cmd );
    13531394display_string( string );
    13541395#endif
  • trunk/user/sort/sort.c

    r619 r623  
    2929#include <hal_macros.h>
    3030
    31 #define ARRAY_LENGTH        256        // number of values
     31#define ARRAY_LENGTH        1024       // number of items
    3232#define MAX_THREADS         1024       // 16 * 16 * 4
    33 #define USE_DQT_BARRIER     1
    34 #define DISPLAY_ARRAY       0
    35 #define INTERACTIVE_MODE    0
     33
     34#define USE_DQT_BARRIER     1          // use DQT barrier if non zero
     35#define DISPLAY_ARRAY       0          // display items values before and after
     36#define VERBOSE             0          // for debug
     37#define INTERACTIVE_MODE    0          // for debug
     38#define CHECK_RESULT        0          // for debug
     39#define INSTRUMENTATION     1          // register computation times on file
    3640
    3741/////////////////////////////////////////////////////////////
     
    8488
    8589///////////////////////////////////
    86 static void merge( const int * src,
    87                    int       * dst,
    88                    int         length,
    89                    int         init_pos_src_a,
    90                    int         init_pos_src_b,
    91                    int         init_pos_dst )
     90static void merge( const int * src,               // source array
     91                   int       * dst,               // destination array
     92                   int         length,            // number of items in a subset
     93                   int         init_pos_src_a,    // index first item in src subset A
     94                   int         init_pos_src_b,    // index first item in src subset B
     95                   int         init_pos_dst )     // index first item in destination
    9296{
    9397    int i;
     
    135139    unsigned int       lid;
    136140
    137     int         * src_array  = NULL;
    138     int         * dst_array  = NULL;
     141    int              * src_array  = NULL;
     142    int              * dst_array  = NULL;
    139143
    140144    // get core coordinates an date
     
    146150    unsigned int  main_uid   = ptr->main_uid;
    147151
     152#if DISPLAY_ARRAY
     153unsigned int n;
     154if( thread_uid == main_uid )
     155{
     156    printf("\n*** array before sort\n");
     157    for( n=0; n<ARRAY_LENGTH; n++) printf("array[%d] = %d\n", n , array0[n] );
     158}
     159#endif
     160
     161    /////////////////////////////////
     162    pthread_barrier_wait( &barrier );
     163
     164#if VERBOSE
     165printf("\n[sort] thread[%d] exit barrier 0\n", thread_uid );
     166#endif
     167
    148168    unsigned int  items      = ARRAY_LENGTH / threads;
    149169    unsigned int  stages     = __builtin_ctz( threads ) + 1;
    150170
    151     printf("\n[SORT] thread[%d] : start\n", thread_uid );
     171#if VERBOSE
     172printf("\n[sort] thread[%d] : start\n", thread_uid );
     173#endif
    152174
    153175    bubbleSort( array0, items, items * thread_uid );
    154176
    155     printf("\n[SORT] thread[%d] : stage 0 completed\n", thread_uid );
     177#if VERBOSE
     178printf("\n[sort] thread[%d] : stage 0 completed\n", thread_uid );
     179#endif
    156180
    157181    /////////////////////////////////
    158182    pthread_barrier_wait( &barrier );
    159     printf("\n[SORT] thread[%d] exit barrier 0\n", thread_uid );
    160 
    161     // the number of threads contributing to sort
    162     // is divided by 2 at each next stage
     183
     184#if VERBOSE
     185printf("\n[sort] thread[%d] exit barrier 0\n", thread_uid );
     186#endif
     187
     188#if DISPLAY_ARRAY
     189if( thread_uid == main_uid )
     190{
     191    printf("\n*** array after bubble sort\n");
     192    for( n=0; n<ARRAY_LENGTH; n++) printf("array[%d] = %d\n", n , array0[n] );
     193}
     194#endif
     195
     196    // the number of threads contributing to sort is divided by 2
     197    // and the number of items is multiplied by 2 at each next stage
    163198    for ( i = 1 ; i < stages ; i++ )
    164199    {
    165         pthread_barrier_wait( &barrier );
     200        if((i % 2) == 1)               // odd stage
     201        {
     202            src_array = array0;
     203            dst_array = array1;
     204        }
     205        else                           // even stage
     206        {
     207            src_array = array1;
     208            dst_array = array0;
     209        }
    166210
    167211        if( (thread_uid & ((1<<i)-1)) == 0 )
    168212        {
    169             printf("\n[SORT] thread[%d] : stage %d start\n", thread_uid , i );
    170 
    171             if((i % 2) == 1)               // odd stage
    172             {
    173                 src_array = array0;
    174                 dst_array = array1;
    175             }
    176             else                           // even stage
    177             {
    178                 src_array = array1;
    179                 dst_array = array0;
    180             }
    181 
     213
     214#if VERBOSE
     215printf("\n[sort] thread[%d] : stage %d start\n", thread_uid , i );
     216#endif
    182217            merge( src_array,
    183218                   dst_array,
    184                    items << i,
     219                   items << (i-1),
    185220                   items * thread_uid,
    186221                   items * (thread_uid + (1 << (i-1))),
    187222                   items * thread_uid );
    188223
    189             printf("\n[SORT] thread[%d] : stage %d completed\n", thread_uid , i );
     224#if VERBOSE
     225printf("\n[sort] thread[%d] : stage %d completed\n", thread_uid , i );
     226#endif
    190227        }
    191228
    192229        /////////////////////////////////
    193230        pthread_barrier_wait( &barrier );
    194         printf("\n[SORT] thread[%d] exit barrier %d\n", thread_uid , i );
    195 
    196     }
     231
     232#if VERBOSE
     233printf("\n[sort] thread[%d] exit barrier %d\n", thread_uid , i );
     234#endif
     235
     236#if DISPLAY_ARRAY
     237if( thread_uid == main_uid )
     238{
     239    printf("\n*** array after merge %d\n", i );
     240    for( n=0; n<ARRAY_LENGTH; n++) printf("array[%d] = %d\n", n , dst_array[n] );
     241}
     242#endif
     243
     244    }  // en for stages
    197245
    198246    // all threads but the main thread exit
     
    220268    unsigned int           lid;                // core local index for a thread
    221269    unsigned int           n;                  // index in array to sort
    222     unsigned long long     cycle;              // current date for log
    223270    pthread_t              trdid;              // kernel allocated thread index (unused)
    224271    pthread_barrierattr_t  barrier_attr;       // barrier attributes
    225272
     273    unsigned long long     start_cycle;
     274    unsigned long long     seq_end_cycle;
     275    unsigned long long     para_end_cycle;
     276
     277    /////////////////////////
     278    get_cycle( &start_cycle );
     279 
    226280    // compute number of threads (one thread per core)
    227281    get_config( &x_size , &y_size , &ncores );
     
    240294         (total_threads != 512) && (total_threads != 1024) )
    241295    {
    242         printf("\n[SORT ERROR] number of cores must be power of 2\n");
     296        printf("\n[sort error] number of cores must be power of 2\n");
    243297        exit( 0 );
    244298    }
     
    247301    if ( ARRAY_LENGTH % total_threads)
    248302    {
    249         printf("\n[SORT ERROR] array size must be multiple of number of threads\n");
     303        printf("\n[sort error] array size must be multiple of number of threads\n");
    250304        exit( 0 );
    251305    }
    252306
    253     printf("\n\n[SORT] main starts on core[%x,%d] / %d threads / %d values / PID %x\n",
    254     main_cxy, main_lid, total_threads, ARRAY_LENGTH, getpid() );
     307    printf("\n\n[sort] main starts / %d threads / %d items / pid %x / cycle %d\n",
     308    total_threads, ARRAY_LENGTH, getpid(), (unsigned int)start_cycle );
    255309
    256310    // initialize barrier
     
    269323    if( error )
    270324    {
    271         printf("\n[SORT ERROR] cannot initialise barrier\n" );
     325        printf("\n[sort error] cannot initialise barrier\n" );
    272326        exit( 0 );
    273327    }
    274328
    275     printf("\n[SORT] main completes barrier init\n");
     329#if VERBOSE
     330printf("\n[sort] main completes barrier init\n");
     331#endif
    276332
    277333    // Array to sort initialization
     
    281337    }
    282338
    283 #if DISPLAY_ARRAY
    284 printf("\n*** array before sort\n");
    285 for( n=0; n<ARRAY_LENGTH; n++) printf("array[%d] = %d\n", n , array0[n] );
    286 #endif
    287 
    288     printf("\n[SORT] main completes array init\n");
     339#if VERBOSE
     340printf("\n[sort] main completes array init\n");
     341#endif
    289342
    290343    // launch other threads to execute sort() function
     
    317370                                         &arg[thread_uid] ) ) // sort arguments
    318371                    {
    319                         printf("\n[SORT ERROR] main cannot create thread %x \n", thread_uid );
     372                        printf("\n[sort error] main cannot create thread %x \n", thread_uid );
    320373                        exit( 0 );
    321374                    }
    322375                    else
    323376                    {
    324                         printf("\n[SORT] main created thread %x \n", thread_uid );
     377#if VERBOSE
     378printf("\n[sort] main created thread %x \n", thread_uid );
     379#endif
    325380                    }
    326381                }
     
    329384    }
    330385   
    331     get_cycle( &cycle );
    332     printf("\n[SORT] main completes threads create at cycle %d\n", (unsigned int)cycle );
     386    ///////////////////////////
     387    get_cycle( &seq_end_cycle );
     388
     389#if VERBOSE
     390printf("\n[sort] main completes sequencial init at cycle %d\n",
     391(unsigned int)seq_end_cycle );
     392#endif
    333393
    334394#if INTERACTIVE_MODE
     
    339399    sort( &arg[main_uid] );
    340400
     401    ////////////////////////////
     402    get_cycle( &para_end_cycle );
     403
     404    printf("\n[sort] main completes parallel sort at cycle %d\n",
     405    (unsigned int)para_end_cycle );
     406
     407    // destroy barrier
     408    pthread_barrier_destroy( &barrier );
     409
    341410#if INTERACTIVE_MODE
    342411idbg();
    343412#endif
    344413
    345     // destroy barrier
    346     pthread_barrier_destroy( &barrier );
    347 
    348 #if INTERACTIVE_MODE
    349 idbg();
    350 #endif
    351 
    352     // Check result
    353     int    success = 1;
    354     int*   res_array = ( (total_threads ==   2) ||
    355                          (total_threads ==   8) ||
    356                          (total_threads ==  32) ||
    357                          (total_threads == 128) ||
    358                          (total_threads == 512) ) ? array1 : array0;
    359    
    360     for( n=0 ; n<(ARRAY_LENGTH-2) ; n++ )
    361     {
    362         if ( res_array[n] > res_array[n+1] )
    363         {
    364             printf("\n[SORT] array[%d] = %d > array[%d] = %d\n",
    365             n , res_array[n] , n+1 , res_array[n+1] );
    366             success = 0;
    367             break;
    368         }
    369     }
    370 
    371 #if DISPLAY_ARRAY
    372 printf("\n*** array after sort\n");
    373 for( n=0; n<ARRAY_LENGTH; n++) printf("array[%d] = %d\n", n , res_array[n] );
    374 #endif
    375 
    376     get_cycle( &cycle );
    377 
    378     if ( success )
    379     {
    380         printf("\n[SORT] success at cycle %d\n", (unsigned int)cycle );
    381     }
    382     else
    383     {
    384         printf("\n[SORT] failure at cycle %d\n", (unsigned int)cycle );
    385     }
    386 
    387 #if INTERACTIVE_MODE
    388 idbg();
     414#if CHECK_RESULT   
     415int    success = 1;
     416int*   res_array = ( (total_threads ==   2) ||
     417                     (total_threads ==   8) ||
     418                     (total_threads ==  32) ||
     419                     (total_threads == 128) ||
     420                     (total_threads == 512) ) ? array1 : array0;
     421
     422for( n=0 ; n<(ARRAY_LENGTH-2) ; n++ )
     423{
     424    if ( res_array[n] > res_array[n+1] )
     425    {
     426        printf("\n[sort] array[%d] = %d > array[%d] = %d\n",
     427        n , res_array[n] , n+1 , res_array[n+1] );
     428        success = 0;
     429        break;
     430    }
     431}
     432
     433if ( success ) printf("\n[sort] success\n");
     434else           printf("\n[sort] failure\n");
     435#endif
     436
     437#if INSTRUMENTATION
     438char   name[64];
     439char   path[128];
     440
     441// build a file name from n_items / n_clusters / n_cores
     442if( USE_DQT_BARRIER ) snprintf( name , 64 , "sort_dqt_%d_%d_%d",
     443                      ARRAY_LENGTH, x_size * y_size, ncores );
     444else                  snprintf( name , 64 , "sort_smp_%d_%d_%d",
     445                      ARRAY_LENGTH, x_size * y_size, ncores );
     446
     447// build file pathname
     448snprintf( path , 128 , "home/%s" , name );
     449
     450// compute results
     451unsigned int sequencial = (unsigned int)(seq_end_cycle - start_cycle);
     452unsigned int parallel   = (unsigned int)(para_end_cycle - seq_end_cycle);
     453
     454// display results on process terminal
     455printf("\n----- %s -----\n"
     456       " - sequencial : %d cycles\n"
     457       " - parallel   : %d cycles\n",
     458       name, sequencial, parallel );
     459
     460// open file
     461FILE * stream = fopen( path , NULL );
     462if( stream == NULL )
     463{
     464    printf("\n[sort error] cannot open instrumentation file <%s>\n", name );
     465    exit(0);
     466}
     467
     468// register results to file
     469fprintf( stream , "\n----- %s -----\n"
     470                  " - sequencial : %d cycles\n"
     471                  " - parallel   : %d cycles\n",
     472         name, sequencial, parallel );
     473
     474// close instrumentation file
     475if( fclose( stream ) )
     476{
     477    printf("\n[sort error] cannot close instrumentation file <%s>\n", name );
     478    exit(0);
     479}
    389480#endif
    390481
     
    392483
    393484}  // end main()
    394 
    395485
    396486/*
Note: See TracChangeset for help on using the changeset viewer.