/* * vfs.c - Virtual File System implementation. * * Author Mohamed Lamine Karaoui (2015) * Alain Greiner (2016,2017,2018) * * Copyright (c) UPMC Sorbonne Universites * * This file is part of ALMOS-MKH. * * ALMOS-MKH is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2.0 of the License. * * ALMOS-MKH is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with ALMOS-MKH; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include ////////////////////////////////////////////////////////////////////////////////////////// // Extern variables ////////////////////////////////////////////////////////////////////////////////////////// extern vfs_ctx_t fs_context[FS_TYPES_NR]; // allocated in kernel_init.c extern chdev_directory_t chdev_dir; // allocated in kernel_init.c ////////////////////////////////////////////////////////////////////////////////////////// // Context related functions ////////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////// void vfs_ctx_init( vfs_fs_type_t type, uint32_t attr, uint32_t total_clusters, uint32_t cluster_size, xptr_t vfs_root_xp, void * extend ) { vfs_ctx_t * vfs_ctx = &fs_context[type]; vfs_ctx->type = type; vfs_ctx->attr = attr; vfs_ctx->total_clusters = total_clusters; vfs_ctx->cluster_size = cluster_size; vfs_ctx->vfs_root_xp = vfs_root_xp; vfs_ctx->extend = extend; spinlock_init( &vfs_ctx->lock ); bitmap_init( vfs_ctx->bitmap , BITMAP_SIZE(CONFIG_VFS_MAX_INODES) ); } //////////////////////////////////////////// error_t vfs_ctx_inum_alloc( vfs_ctx_t * ctx, uint32_t * inum ) { // get lock on inum allocator spinlock_lock( &ctx->lock ); // get lid from local inum allocator uint32_t lid = bitmap_ffc( ctx->bitmap , CONFIG_VFS_MAX_INODES ); if( lid == 0xFFFFFFFF ) // no more free slot => error { // release lock spinlock_unlock( &ctx->lock ); // return error return 1; } else // found => return inum { // set slot allocated bitmap_set( ctx->bitmap , lid ); // release lock spinlock_unlock( &ctx->lock ); // return inum *inum = (((uint32_t)local_cxy) << 16) | (lid & 0xFFFF); return 0; } } //////////////////////////////////////////// void vfs_ctx_inum_release( vfs_ctx_t * ctx, uint32_t inum ) { bitmap_clear( ctx->bitmap , inum & 0xFFFF ); } ////////////////////////////////////////////////////////////////////////////////////////// // Inode related functions ////////////////////////////////////////////////////////////////////////////////////////// char * vfs_inode_type_str( uint32_t type ) { if ( type == INODE_TYPE_FILE ) return "FILE"; else if( type == INODE_TYPE_DIR ) return "DIR "; else if( type == INODE_TYPE_FIFO ) return "FIFO"; else if( type == INODE_TYPE_PIPE ) return "PIPE"; else if( type == INODE_TYPE_SOCK ) return "SOCK"; else if( type == INODE_TYPE_DEV ) return "DEV "; else if( type == INODE_TYPE_SYML ) return "SYML"; else return "undefined"; } ////////////////////////////////////////////////////// error_t vfs_inode_create( xptr_t dentry_xp, vfs_fs_type_t fs_type, vfs_inode_type_t inode_type, void * extend, uint32_t attr, uint32_t rights, uid_t uid, gid_t gid, xptr_t * inode_xp ) { mapper_t * mapper; // associated mapper( to be allocated) vfs_inode_t * inode; // inode descriptor (to be allocated) uint32_t inum; // inode identifier (to be allocated) vfs_ctx_t * ctx; // file system context kmem_req_t req; // request to kernel memory allocator error_t error; #if DEBUG_VFS_INODE_CREATE uint32_t cycle = (uint32_t)hal_get_cycles(); if( DEBUG_VFS_INODE_CREATE < cycle ) printk("\n[DBG] %s : thread %x enter / dentry = %x in cluster %x / cycle %d\n", __FUNCTION__, CURRENT_THREAD, GET_PTR(dentry_xp), GET_CXY(dentry_xp), cycle ); #endif // check fs type and get pointer on context if ( fs_type == FS_TYPE_FATFS ) ctx = &fs_context[FS_TYPE_FATFS]; else if( fs_type == FS_TYPE_RAMFS ) ctx = &fs_context[FS_TYPE_RAMFS]; else if( fs_type == FS_TYPE_DEVFS ) ctx = &fs_context[FS_TYPE_DEVFS]; else { ctx = NULL; assert( false , "illegal file system type = %d\n" , fs_type ); } // allocate inum error = vfs_ctx_inum_alloc( ctx , &inum ); if( error ) { printk("\n[ERROR] in %s : cannot allocate inum\n", __FUNCTION__ ); return ENOMEM; } // allocate memory for mapper mapper = mapper_create( fs_type ); if( mapper == NULL ) { printk("\n[ERROR] in %s : cannot allocate mapper\n", __FUNCTION__ ); vfs_ctx_inum_release( ctx , inum ); return ENOMEM; } // allocate memory for VFS inode descriptor req.type = KMEM_VFS_INODE; req.size = sizeof(vfs_inode_t); req.flags = AF_KERNEL | AF_ZERO; inode = (vfs_inode_t *)kmem_alloc( &req ); if( inode == NULL ) { printk("\n[ERROR] in %s : cannot allocate inode descriptor\n", __FUNCTION__ ); vfs_ctx_inum_release( ctx , inum ); mapper_destroy( mapper ); return ENOMEM; } // initialize inode descriptor inode->gc = 0; inode->type = inode_type; inode->inum = inum; inode->attr = attr; inode->rights = rights; inode->uid = uid; inode->gid = gid; inode->refcount = 0; inode->parent_xp = dentry_xp; inode->ctx = ctx; inode->mapper = mapper; inode->extend = extend; // initialise inode field in mapper mapper->inode = inode; // initialise threads waiting queue xlist_root_init( XPTR( local_cxy , &inode->wait_root ) ); // initialize dentries hash table xhtab_init( &inode->children , XHTAB_DENTRY_TYPE ); // initialize inode locks remote_rwlock_init( XPTR( local_cxy , &inode->data_lock ) ); remote_spinlock_init( XPTR( local_cxy , &inode->main_lock ) ); #if DEBUG_VFS_INODE_CREATE cycle = (uint32_t)hal_get_cycles(); if( DEBUG_VFS_INODE_CREATE < cycle ) printk("\n[DBG] %s : thread %x exit / inode = %x in cluster %x / cycle %d\n", __FUNCTION__, CURRENT_THREAD, inode, local_cxy, cycle ); #endif // return extended pointer on inode *inode_xp = XPTR( local_cxy , inode ); return 0; } // end vfs_inode_create() //////////////////////////////////////////////// error_t vfs_inode_destroy( vfs_inode_t * inode ) { assert( (inode->refcount == 0) , "inode refcount non zero\n" ); // release memory allocated for mapper mapper_destroy( inode->mapper ); // release memory allocate for inode descriptor kmem_req_t req; req.ptr = inode; req.type = KMEM_VFS_INODE; kmem_free( &req ); return 0; } // end vfs_inode_destroy() ///////////////////////////////////////////// error_t vfs_inode_load( vfs_inode_t * parent, char * name, xptr_t child_xp ) { #if DEBUG_VFS_INODE_LOAD uint32_t cycle = (uint32_t)hal_get_cycles(); if( DEBUG_VFS_INODE_LOAD < cycle ) printk("\n[DBG] %s : thread %x enter for <%s> / cycle %d\n", __FUNCTION__, CURRENT_THREAD , name , cycle ); #endif error_t error = 0; assert( (parent != NULL) , "parent pointer is NULL\n"); assert( (child_xp != XPTR_NULL) , "child pointer is NULL\n"); // get parent inode FS type vfs_fs_type_t fs_type = parent->ctx->type; // call relevant FS function if( fs_type == FS_TYPE_FATFS ) { error = fatfs_inode_load( parent , name , child_xp ); } else if( fs_type == FS_TYPE_RAMFS ) { assert( false , "should not be called for RAMFS\n" ); } else if( fs_type == FS_TYPE_DEVFS ) { assert( false , "should not be called for DEVFS\n" ); } else { assert( false , "undefined file system type\n" ); } #if DEBUG_VFS_INODE_LOAD cycle = (uint32_t)hal_get_cycles(); if( DEBUG_VFS_INODE_LOAD < cycle ) printk("\n[DBG] %s : thread %x exit for <%s> / cycle %d\n", __FUNCTION__, CURRENT_THREAD , name , cycle ); #endif return error; } // end vfs_inode_load() //////////////////////////////////////////// void vfs_inode_remote_up( xptr_t inode_xp ) { // get inode cluster and local pointer cxy_t inode_cxy = GET_CXY( inode_xp ); vfs_inode_t * inode_ptr = GET_PTR( inode_xp ); hal_remote_atomic_add( XPTR( inode_cxy , &inode_ptr->refcount ) , 1 ); } ////////////////////////////////////////////// void vfs_inode_remote_down( xptr_t inode_xp ) { // get inode cluster and local pointer cxy_t inode_cxy = GET_CXY( inode_xp ); vfs_inode_t * inode_ptr = GET_PTR( inode_xp ); hal_remote_atomic_add( XPTR( inode_cxy , &inode_ptr->refcount ) , -1 ); } ////////////////////////////////////////////// uint32_t vfs_inode_get_size( xptr_t inode_xp ) { // get inode cluster and local pointer cxy_t cxy = GET_CXY( inode_xp ); vfs_inode_t * ptr = GET_PTR( inode_xp ); // get size remote_rwlock_rd_lock( XPTR( cxy , &ptr->data_lock ) ); uint32_t size = hal_remote_lw( XPTR( cxy , &ptr->size ) ); remote_rwlock_rd_unlock( XPTR( cxy , &ptr->data_lock ) ); return size; } //////////////////////////////////////////// void vfs_inode_set_size( xptr_t inode_xp, uint32_t size ) { // get inode cluster and local pointer cxy_t cxy = GET_CXY( inode_xp ); vfs_inode_t * ptr = GET_PTR( inode_xp ); // set size remote_rwlock_wr_unlock( XPTR( cxy , &ptr->data_lock ) ); hal_remote_sw( XPTR( cxy , &ptr->size ) , size ); remote_rwlock_wr_unlock( XPTR( cxy , &ptr->data_lock ) ); } //////////////////////////////////////// void vfs_inode_unlock( xptr_t inode_xp ) { // get inode cluster and local pointer cxy_t cxy = GET_CXY( inode_xp ); vfs_inode_t * ptr = GET_PTR( inode_xp ); // release the main lock remote_spinlock_unlock( XPTR( cxy , &ptr->main_lock ) ); } ////////////////////////////////////// void vfs_inode_lock( xptr_t inode_xp ) { // get inode cluster and local pointer cxy_t cxy = GET_CXY( inode_xp ); vfs_inode_t * ptr = GET_PTR( inode_xp ); // get the main lock remote_spinlock_lock( XPTR( cxy , &ptr->main_lock ) ); } ///////////////////////////////////////// void vfs_inode_get_name( xptr_t inode_xp, char * name ) { cxy_t inode_cxy; vfs_inode_t * inode_ptr; xptr_t dentry_xp; cxy_t dentry_cxy; vfs_dentry_t * dentry_ptr; // get inode cluster and local pointer inode_cxy = GET_CXY( inode_xp ); inode_ptr = GET_PTR( inode_xp ); // get parent dentry dentry_xp = hal_remote_lwd( XPTR( inode_cxy , &inode_ptr->parent_xp ) ); // get local copy of name if( dentry_xp == XPTR_NULL ) // it is the VFS root { strcpy( name , "/" ); } else // not the VFS root { dentry_cxy = GET_CXY( dentry_xp ); dentry_ptr = GET_PTR( dentry_xp ); hal_remote_strcpy( XPTR( local_cxy , name ) , XPTR( dentry_cxy , &dentry_ptr->name ) ); } } // end vfs_inode_get_name() //////////////////////////////////////////////////////////////////////////////////////////// // Dentry related functions ////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////// error_t vfs_dentry_create( vfs_fs_type_t fs_type, char * name, vfs_inode_t * parent, xptr_t * dentry_xp ) { vfs_ctx_t * ctx; // context descriptor vfs_dentry_t * dentry; // dentry descriptor (to be allocated) kmem_req_t req; // request to kernel memory allocator error_t error; #if DEBUG_VFS_DENTRY_CREATE uint32_t cycle = (uint32_t)hal_get_cycles(); if( DEBUG_VFS_DENTRY_CREATE < cycle ) printk("\n[DBG] %s : thread %x in process %x enter for <%s> / parent_inode %x / cycle %d\n", __FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid, name, parent, cycle ); #endif // get pointer on context if ( fs_type == FS_TYPE_FATFS ) ctx = &fs_context[FS_TYPE_FATFS]; else if( fs_type == FS_TYPE_RAMFS ) ctx = &fs_context[FS_TYPE_RAMFS]; else if( fs_type == FS_TYPE_DEVFS ) ctx = &fs_context[FS_TYPE_DEVFS]; else { ctx = NULL; return EINVAL; } // get name length uint32_t length = strlen( name ); if( length >= CONFIG_VFS_MAX_NAME_LENGTH ) return EINVAL; // allocate memory for dentry descriptor req.type = KMEM_VFS_DENTRY; req.size = sizeof(vfs_dentry_t); req.flags = AF_KERNEL | AF_ZERO; dentry = (vfs_dentry_t *)kmem_alloc( &req ); if( dentry == NULL ) return ENOMEM; // initialize dentry descriptor dentry->ctx = ctx; dentry->length = length; dentry->parent = parent; strcpy( dentry->name , name ); #if( DEBUG_VFS_DENTRY_CREATE & 1 ) cycle = (uint32_t)hal_get_cycles(); if( DEBUG_VFS_DENTRY_CREATE < cycle ) printk("\n[DBG] %s : dentry initialised\n", __FUNCTION__ ); #endif // register dentry in hash table rooted in parent inode error = xhtab_insert( XPTR( local_cxy , &parent->children ), name, XPTR( local_cxy , &dentry->list ) ); if( error ) return EINVAL; #if( DEBUG_VFS_DENTRY_CREATE & 1 ) cycle = (uint32_t)hal_get_cycles(); if( DEBUG_VFS_DENTRY_CREATE < cycle ) printk("\n[DBG] %s : dentry registerd in htab\n", __FUNCTION__ ); #endif // return extended pointer on dentry *dentry_xp = XPTR( local_cxy , dentry ); #if DEBUG_VFS_DENTRY_CREATE cycle = (uint32_t)hal_get_cycles(); if( DEBUG_VFS_DENTRY_CREATE < cycle ) printk("\n[DBG] %s : thread %x in process %x exit for <%s> / dentry %x / cycle %d\n", __FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid, name, dentry, cycle ); #endif return 0; } // end vfs_dentry_create() /////////////////////////////////////////////////// error_t vfs_dentry_destroy( vfs_dentry_t * dentry ) { error_t error; assert( (dentry->refcount == 0) , __FUNCTION__ , "dentry refcount non zero\n" ); // get pointer on parent inode vfs_inode_t * parent = dentry->parent; // remove this dentry from parent inode htab error = xhtab_remove( XPTR( local_cxy , &parent->children ), dentry->name, XPTR( local_cxy , &dentry->list ) ); if( error ) return EINVAL; // release memory allocated to dentry kmem_req_t req; req.ptr = dentry; req.type = KMEM_VFS_DENTRY; kmem_free( &req ); return 0; } ////////////////////////////////////////////////////////////////////////////////////////// // File descriptor related functions ////////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////// error_t vfs_file_create( vfs_inode_t * inode, uint32_t attr, xptr_t * file_xp ) { vfs_file_t * file; kmem_req_t req; // allocate memory for new file descriptor req.type = KMEM_VFS_FILE; req.size = sizeof(vfs_file_t); req.flags = AF_KERNEL | AF_ZERO; file = (vfs_file_t *)kmem_alloc( &req ); if( file == NULL ) return ENOMEM; // initializes new file descriptor file->gc = 0; file->type = inode->type; file->attr = attr; file->offset = 0; file->refcount = 1; file->inode = inode; file->ctx = inode->ctx; file->mapper = inode->mapper; remote_rwlock_init( XPTR( local_cxy , &file->lock ) ); *file_xp = XPTR( local_cxy , file ); #if DEBUG_VFS_OPEN uint32_t cycle = (uint32_t)hal_get_cycles(); if( DEBUG_VFS_OPEN < cycle ) printk("\n[DBG] %s : thread %x in process %x created file %x in cluster %x / cycle %d\n", __FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid, file, local_cxy, cycle ); #endif return 0; } // end vfs_file_create() /////////////////////////////////////////// void vfs_file_destroy( vfs_file_t * file ) { if( file->refcount ) { assert( false , "refcount non zero\n" ); } kmem_req_t req; req.ptr = file; req.type = KMEM_VFS_FILE; kmem_free( &req ); #if DEBUG_VFS_CLOSE uint32_t cycle = (uint32_t)hal_get_cycles(); if( DEBUG_VFS_CLOSE < cycle ) printk("\n[DBG] %s : thread %x in process %x deleted file %x in cluster %x / cycle %d\n", __FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid, file, local_cxy, cycle ); #endif } // end vfs_file_destroy() //////////////////////////////////////// void vfs_file_count_up( xptr_t file_xp ) { // get file cluster and local pointer cxy_t file_cxy = GET_CXY( file_xp ); vfs_file_t * file_ptr = GET_PTR( file_xp ); // atomically increment count hal_remote_atomic_add( XPTR( file_cxy , &file_ptr->refcount ) , 1 ); } ////////////////////////////////////////// void vfs_file_count_down( xptr_t file_xp ) { // get file cluster and local pointer cxy_t file_cxy = GET_CXY( file_xp ); vfs_file_t * file_ptr = GET_PTR( file_xp ); // atomically decrement count hal_remote_atomic_add( XPTR( file_cxy , &file_ptr->refcount ) , -1 ); } ////////////////////////////////////////////////////////////////////////////////////////// // File access related functions ////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////// error_t vfs_open( process_t * process, char * path, uint32_t flags, uint32_t mode, xptr_t * new_file_xp, uint32_t * new_file_id ) { error_t error; xptr_t inode_xp; // extended pointer on target inode cxy_t inode_cxy; // inode cluster identifier vfs_inode_t * inode_ptr; // inode local pointer uint32_t file_attr; // file descriptor attributes uint32_t lookup_mode; // lookup working mode xptr_t file_xp; // extended pointer on created file descriptor uint32_t file_id; // created file descriptor index in reference fd_array assert( (mode == 0), __FUNCTION__, "the mode parameter is not supported yet\n" ); #if DEBUG_VFS_OPEN uint32_t cycle = (uint32_t)hal_get_cycles(); if( DEBUG_VFS_OPEN < cycle ) printk("\n[DBG] %s : thread %x in process %x enter for <%s> / cycle %d\n", __FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid, path, cycle ); #endif // compute lookup working mode lookup_mode = VFS_LOOKUP_OPEN; if( (flags & O_DIR ) ) lookup_mode |= VFS_LOOKUP_DIR; if( (flags & O_CREAT ) ) lookup_mode |= VFS_LOOKUP_CREATE; if( (flags & O_EXCL ) ) lookup_mode |= VFS_LOOKUP_EXCL; // compute attributes for the created file file_attr = 0; if( (flags & O_RDONLY ) == 0 ) file_attr |= FD_ATTR_WRITE_ENABLE; if( (flags & O_WRONLY ) == 0 ) file_attr |= FD_ATTR_READ_ENABLE; if( (flags & O_SYNC ) ) file_attr |= FD_ATTR_SYNC; if( (flags & O_APPEND ) ) file_attr |= FD_ATTR_APPEND; if( (flags & O_CLOEXEC) ) file_attr |= FD_ATTR_CLOSE_EXEC; // get extended pointer on target inode error = vfs_lookup( process->vfs_cwd_xp , path , lookup_mode , &inode_xp ); if( error ) return error; // get target inode cluster and local pointer inode_cxy = GET_CXY( inode_xp ); inode_ptr = GET_PTR( inode_xp ); // create a new file descriptor in cluster containing inode if( inode_cxy == local_cxy ) // target cluster is local { error = vfs_file_create( inode_ptr , file_attr , &file_xp ); } else // target cluster is remote { rpc_vfs_file_create_client( inode_cxy , inode_ptr , file_attr , &file_xp , &error ); } if( error ) return error; // allocate and register a new file descriptor index in reference process error = process_fd_register( process , file_xp , &file_id ); if( error ) return error; #if DEBUG_VFS_OPEN cycle = (uint32_t)hal_get_cycles(); if( DEBUG_VFS_OPEN < cycle ) printk("\n[DBG] %s : thread %x in process %x exit for <%s> / fdid %d / cluster %x / cycle %d\n", __FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid, path, file_id, GET_CXY( file_xp ), cycle ); #endif // success *new_file_xp = file_xp; *new_file_id = file_id; return 0; } // end vfs_open() ////////////////////////////////////// int vfs_user_move( bool_t to_buffer, xptr_t file_xp, void * buffer, uint32_t size ) { assert( ( file_xp != XPTR_NULL ) , "file_xp == XPTR_NULL" ); cxy_t file_cxy; // remote file descriptor cluster vfs_file_t * file_ptr; // remote file descriptor local pointer vfs_inode_type_t inode_type; uint32_t file_offset; // current offset in file mapper_t * mapper; error_t error; // get cluster and local pointer on remote file descriptor file_cxy = GET_CXY( file_xp ); file_ptr = GET_PTR( file_xp ); // get inode type from remote file descriptor inode_type = hal_remote_lw( XPTR( file_cxy , &file_ptr->type ) ); assert( (inode_type == INODE_TYPE_FILE) , "inode type is not INODE_TYPE_FILE" ); // get mapper pointer and file offset from file descriptor file_offset = hal_remote_lw( XPTR( file_cxy , &file_ptr->offset ) ); mapper = (mapper_t *)hal_remote_lpt( XPTR( file_cxy , &file_ptr->mapper ) ); // move data between mapper and buffer if( file_cxy == local_cxy ) { error = mapper_move_user( mapper, to_buffer, file_offset, buffer, size ); } else { rpc_mapper_move_buffer_client( file_cxy, mapper, to_buffer, true, // user buffer file_offset, (uint64_t)(intptr_t)buffer, size, &error ); } if( error ) return -1; else return size; } // end vfs_user_move() //////////////////////////////////////////// error_t vfs_kernel_move( bool_t to_buffer, xptr_t file_xp, xptr_t buffer_xp, uint32_t size ) { assert( ( file_xp != XPTR_NULL ) , "file_xp == XPTR_NULL" ); cxy_t file_cxy; // remote file descriptor cluster vfs_file_t * file_ptr; // remote file descriptor local pointer vfs_inode_type_t inode_type; uint32_t file_offset; // current offset in file mapper_t * mapper; error_t error; // get cluster and local pointer on remote file descriptor file_cxy = GET_CXY( file_xp ); file_ptr = GET_PTR( file_xp ); // get inode type from remote file descriptor inode_type = hal_remote_lw( XPTR( file_cxy , &file_ptr->type ) ); // action depends on inode type if( inode_type == INODE_TYPE_FILE ) { // get mapper pointer and file offset from file descriptor file_offset = hal_remote_lw( XPTR( file_cxy , &file_ptr->offset ) ); mapper = (mapper_t *)hal_remote_lpt( XPTR( file_cxy , &file_ptr->mapper ) ); // move data between mapper and buffer if( file_cxy == local_cxy ) { error = mapper_move_kernel( mapper, to_buffer, file_offset, buffer_xp, size ); } else { rpc_mapper_move_buffer_client( file_cxy, mapper, to_buffer, false, // kernel buffer file_offset, buffer_xp, size, &error ); } if( error ) return -1; else return 0; } else { printk("\n[ERROR] in %s : inode is not a file", __FUNCTION__ ); return -1; } } // end vfs_kernel_move() ////////////////////////////////////// error_t vfs_lseek( xptr_t file_xp, uint32_t offset, uint32_t whence, uint32_t * new_offset ) { xptr_t offset_xp; xptr_t lock_xp; cxy_t file_cxy; vfs_file_t * file_ptr; vfs_inode_t * inode_ptr; uint32_t new; assert( (file_xp != XPTR_NULL) , "file_xp == XPTR_NULL" ); // get cluster and local pointer on remote file descriptor file_cxy = GET_CXY( file_xp ); file_ptr = GET_PTR( file_xp ); // build extended pointers on lock and offset offset_xp = XPTR( file_cxy , &file_ptr->offset ); lock_xp = XPTR( file_cxy , &file_ptr->lock ); // take file descriptor lock remote_rwlock_wr_lock( lock_xp ); if ( whence == SEEK_CUR ) // new = current + offset { new = hal_remote_lw( offset_xp ) + offset; } else if ( whence == SEEK_SET ) // new = offset { new = offset; } else if ( whence == SEEK_END ) // new = size + offset { // get local pointer on remote inode inode_ptr = (vfs_inode_t *)hal_remote_lpt( XPTR( file_cxy , &file_ptr->inode ) ); new = hal_remote_lw( XPTR( file_cxy , &inode_ptr->size ) ) + offset; } else { printk("\n[ERROR] in %s : illegal whence value\n", __FUNCTION__ ); remote_rwlock_wr_unlock( lock_xp ); return -1; } // set new offset hal_remote_sw( offset_xp , new ); // release file descriptor lock remote_rwlock_wr_unlock( lock_xp ); // success if ( new_offset != NULL ) *new_offset = new; return 0; } // vfs_lseek() /////////////////////////////////// error_t vfs_close( xptr_t file_xp, uint32_t file_id ) { cluster_t * cluster; // local pointer on local cluster cxy_t file_cxy; // cluster containing the file descriptor. vfs_file_t * file_ptr; // local ponter on file descriptor cxy_t owner_cxy; // process owner cluster lpid_t lpid; // process local index xptr_t root_xp; // root of list of process copies xptr_t lock_xp; // lock protecting the list of copies xptr_t iter_xp; // iterator on list of process copies xptr_t process_xp; // extended pointer on one process copy cxy_t process_cxy; // process copy cluster process_t * process_ptr; // process copy local pointer assert( (file_xp != XPTR_NULL) , "file_xp == XPTR_NULL" ); assert( (file_id < CONFIG_PROCESS_FILE_MAX_NR) , "illegal file_id" ); thread_t * this = CURRENT_THREAD; process_t * process = this->process; #if DEBUG_VFS_CLOSE uint32_t cycle = (uint32_t)hal_get_cycles(); if( DEBUG_VFS_CLOSE < cycle ) printk("\n[DBG] %s : thread %x in process %x enter / fdid %d / cycle %d\n", __FUNCTION__, this->trdid, process->pid, file_id, cycle ); #endif // get local pointer on local cluster manager cluster = LOCAL_CLUSTER; // get owner process cluster and lpid owner_cxy = CXY_FROM_PID( process->pid ); lpid = LPID_FROM_PID( process->pid ); // get extended pointers on copies root and lock root_xp = XPTR( owner_cxy , &cluster->pmgr.copies_root[lpid] ); lock_xp = XPTR( owner_cxy , &cluster->pmgr.copies_lock[lpid] ); // 1) loop on the process descriptor copies to reset all fd_array[file_id] entries // take the lock protecting the list of copies remote_spinlock_lock( lock_xp ); XLIST_FOREACH( root_xp , iter_xp ) { process_xp = XLIST_ELEMENT( iter_xp , process_t , copies_list ); process_cxy = GET_CXY( process_xp ); process_ptr = GET_PTR( process_xp ); #if (DEBUG_VFS_CLOSE & 1 ) if( DEBUG_VFS_CLOSE < cycle ) printk("\n[DBG] %s : reset fd_array[%d] for process %x in cluster %x\n", __FUNCTION__, file_id, process_ptr, process_cxy ); #endif // fd_array lock is required for atomic write of a 64 bits word // xptr_t fd_array_lock_xp = XPTR( process_cxy , &process_ptr->fd_array.lock ); xptr_t entry_xp = XPTR( process_cxy , &process_ptr->fd_array.array[file_id] ); // remote_rwlock_wr_lock( fd_array_lock_xp ); hal_remote_swd( entry_xp , XPTR_NULL ); // remote_rwlock_wr_unlock( fd_array_lock_xp ); vfs_file_count_down( file_xp ); hal_fence(); } // release the lock protecting the list of copies remote_spinlock_unlock( lock_xp ); #if (DEBUG_VFS_CLOSE & 1) if( DEBUG_VFS_CLOSE < cycle ) printk("\n[DBG] %s : thread %x in process %x reset all fd-array copies\n", __FUNCTION__, this->trdid, process->pid ); #endif // 2) release memory allocated to file descriptor in remote cluster // get cluster and local pointer on remote file descriptor file_cxy = GET_CXY( file_xp ); file_ptr = GET_PTR( file_xp ); if( file_cxy == local_cxy ) // file cluster is local { vfs_file_destroy( file_ptr ); } else // file cluster is local { rpc_vfs_file_destroy_client( file_cxy , file_ptr ); } #if DEBUG_VFS_CLOSE cycle = (uint32_t)hal_get_cycles(); if( DEBUG_VFS_CLOSE < cycle ) printk("\n[DBG] %s : thread %x in process %x exit / fdid %d closed / cycle %d\n", __FUNCTION__, this->trdid, process->pid, file_id, cycle ); #endif return 0; } // end vfs_close() //////////////////////////////////// error_t vfs_unlink( xptr_t cwd_xp, char * path ) { assert( false , "not implemented cwd_xp %x, path <%s> \n", cwd_xp, path ); return 0; } //////////////////////////////////////// error_t vfs_stat( xptr_t file_xp, struct stat * k_stat ) { assert( false , "not implemented file_xp: %x, k_stat ptr %x\n", file_xp, k_stat ); return 0; } ///////////////////////////////////////////// error_t vfs_readdir( xptr_t file_xp, struct dirent * k_dirent ) { assert( false , "not implemented file_xp: %x, k_dirent ptr %x\n", file_xp, k_dirent ); return 0; } ////////////////////////////////////// error_t vfs_mkdir( xptr_t file_xp, char * path, uint32_t mode ) { assert( false , "not implemented file_xp: %x, path <%s>, mode: %x\n", file_xp, path, mode ); return 0; } //////////////////////////////////// error_t vfs_rmdir( xptr_t file_xp, char * path ) { assert( false , "not implemented file_xp: %x, path <%s>\n", file_xp, path ); return 0; } /////////////////////////////////// error_t vfs_chdir( xptr_t cwd_xp, char * path ) { error_t error; xptr_t inode_xp; // extended pointer on target inode cxy_t inode_cxy; // target inode cluster identifier vfs_inode_t * inode_ptr; // target inode local pointer uint32_t mode; // lookup working mode vfs_inode_type_t inode_type; // target inode type // set lookup working mode mode = 0; // get extended pointer on target inode error = vfs_lookup( cwd_xp , path , mode , &inode_xp ); if( error ) return error; // get inode cluster and local pointer inode_cxy = GET_CXY( inode_xp ); inode_ptr = GET_PTR( inode_xp ); // get inode type from remote file inode_type = hal_remote_lw( XPTR( inode_cxy , &inode_ptr->type ) ); if( inode_type != INODE_TYPE_DIR ) { CURRENT_THREAD->errno = ENOTDIR; return -1; } assert( false , "not implemented\n" ); return 0; } /////////////////////////////////// error_t vfs_chmod( xptr_t cwd_xp, char * path, uint32_t rights ) { error_t error; xptr_t inode_xp; // extended pointer on target inode cxy_t inode_cxy; // inode cluster identifier vfs_inode_t * inode_ptr; // inode local pointer vfs_inode_type_t inode_type; // target inode type // set lookup working mode assert( (rights == 0), __FUNCTION__, "access rights non implemented yet\n" ); // get extended pointer on target inode error = vfs_lookup( cwd_xp , path , 0 , &inode_xp ); if( error ) return error; // get inode cluster and local pointer inode_cxy = GET_CXY( inode_xp ); inode_ptr = GET_PTR( inode_xp ); // get inode type from remote inode inode_type = hal_remote_lw( XPTR( inode_cxy , &inode_ptr->type ) ); assert( false , "not implemented\n" ); return 0; } /////////////////////////////////// error_t vfs_mkfifo( xptr_t cwd_xp, char * path, uint32_t rights ) { assert( false , "not implemented cwd_xp: %x, path <%s>, rights %x\n", cwd_xp, path, rights ); return 0; } ////////////////////////////////////////////////////////////////////////////////////////// // Inode Tree functions ////////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////// cxy_t vfs_cluster_random_select( void ) { uint32_t x_size = LOCAL_CLUSTER->x_size; uint32_t y_size = LOCAL_CLUSTER->y_size; uint32_t y_width = LOCAL_CLUSTER->y_width; uint32_t index = ( hal_get_cycles() + hal_get_gid() ) % (x_size * y_size); uint32_t x = index / y_size; uint32_t y = index % y_size; return (x<type ) ); // get local pointer on associated mapper mapper_t * mapper_ptr = hal_remote_lpt( XPTR( inode_cxy , &inode_ptr->mapper ) ); // make a local copy of node name hal_remote_strcpy( XPTR( local_cxy , name ) , name_xp ); // display inode nolock_printk("%s%s <%s> : inode = %x / mapper = %x / cluster %x\n", indent_str[indent], vfs_inode_type_str( inode_type ), name, inode_ptr , mapper_ptr , inode_cxy ); // scan directory entries if( inode_type == INODE_TYPE_DIR ) { // get extended pointer on directory entries xhtab children_xp = XPTR( inode_cxy , &inode_ptr->children ); // get xhtab lock xhtab_read_lock( children_xp ); // get first dentry from xhtab child_dentry_xp = xhtab_get_first( children_xp ); while( child_dentry_xp != XPTR_NULL ) { // get dentry cluster and local pointer child_dentry_cxy = GET_CXY( child_dentry_xp ); child_dentry_ptr = GET_PTR( child_dentry_xp ); // get extended pointer on child inode child_inode_xp = hal_remote_lwd( XPTR( child_dentry_cxy, &child_dentry_ptr->child_xp ) ); // get extended pointer on dentry name child_dentry_name_xp = XPTR( child_dentry_cxy , &child_dentry_ptr->name ); // recursive call on child inode vfs_recursive_display( child_inode_xp, child_dentry_name_xp, indent+1 ); // get next dentry child_dentry_xp = xhtab_get_next( children_xp ); } // release xhtab lock xhtab_read_unlock( children_xp ); } } // end vfs_recursive_display() /////////////////////////////////// void vfs_display( xptr_t inode_xp ) { xptr_t name_xp; xptr_t dentry_xp; cxy_t dentry_cxy; vfs_dentry_t * dentry_ptr; uint32_t save_sr; // get target inode cluster and local pointer cxy_t inode_cxy = GET_CXY( inode_xp ); vfs_inode_t * inode_ptr = GET_PTR( inode_xp ); // get extended pointer on associated dentry dentry_xp = hal_remote_lwd( XPTR( inode_cxy , &inode_ptr->parent_xp ) ); // check if target inode is the File System root if( dentry_xp == XPTR_NULL ) { // build extended pointer on root name name_xp = XPTR( local_cxy , "/" ); } else { // get dentry cluster and local pointer dentry_cxy = GET_CXY( dentry_xp ); dentry_ptr = GET_PTR( dentry_xp ); // get extended pointer on dentry name name_xp = XPTR( dentry_cxy , &dentry_ptr->name ); } // get pointers on TXT0 chdev xptr_t txt0_xp = chdev_dir.txt_tx[0]; cxy_t txt0_cxy = GET_CXY( txt0_xp ); chdev_t * txt0_ptr = GET_PTR( txt0_xp ); // get extended pointer on remote TXT0 chdev lock xptr_t lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock ); // get TXT0 lock in busy waiting mode remote_spinlock_lock_busy( lock_xp , &save_sr ); // print header nolock_printk("\n***** file system state\n\n"); // call recursive function vfs_recursive_display( inode_xp , name_xp , 0 ); // release lock remote_spinlock_unlock_busy( lock_xp , save_sr ); } // end vfs_display() ////////////////////////////////////////////////////////////////////////////////////////// // This function is used by the vfs_lookup() function. // It takes an extended pointer on a remote inode (parent directory inode), // and check access_rights violation for the calling thread. // It can be used by any thread running in any cluster. ////////////////////////////////////////////////////////////////////////////////////////// // @ inode_xp : extended pointer on inode. // @ client_uid : client thread user ID // @ client_gid : client thread group ID // @ return true if access rights are violated. ////////////////////////////////////////////////////////////////////////////////////////// bool_t vfs_access_denied( xptr_t inode_xp, uint32_t client_uid, uint32_t client_gid ) { // get found inode cluster and local pointer cxy_t inode_cxy = GET_CXY( inode_xp ); vfs_inode_t * inode_ptr = GET_PTR( inode_xp ); // get inode access mode, UID, and GID // TODO uint32_t mode = hal_remote_lw( XPTR( inode_cxy , &inode_ptr->mode ) ); uid_t uid = hal_remote_lw( XPTR( inode_cxy , &inode_ptr->uid ) ); gid_t gid = hal_remote_lw( XPTR( inode_cxy , &inode_ptr->gid ) ); // FIXME : me must use mode if( (uid == client_uid) || (gid == client_gid) ) return false; else return true; } ////////////////////////////////////////////////////////////////////////////////////////// // This static function is used by the vfs_lookup() function. // It takes an extended pointer on a remote parent directory inode, a directory // entry name, and returns an extended pointer on the child inode. // It can be used by any thread running in any cluster. ////////////////////////////////////////////////////////////////////////////////////////// // @ parent_xp : extended pointer on parent inode in remote cluster. // @ name : dentry name // @ child_xp : [out] buffer for extended pointer on child inode. // @ return true if success / return false if not found. ////////////////////////////////////////////////////////////////////////////////////////// static bool_t vfs_get_child( xptr_t parent_xp, char * name, xptr_t * child_xp ) { xptr_t xhtab_xp; // extended pointer on hash table containing children dentries xptr_t dentry_xp; // extended pointer on children dentry // get parent inode cluster and local pointer cxy_t parent_cxy = GET_CXY( parent_xp ); vfs_inode_t * parent_ptr = GET_PTR( parent_xp ); // get extended pointer on hash table of children directory entries xhtab_xp = XPTR( parent_cxy , &parent_ptr->children ); // search extended pointer on matching dentry dentry_xp = xhtab_lookup( xhtab_xp , name ); if( dentry_xp == XPTR_NULL ) return false; // get dentry cluster and local pointer cxy_t dentry_cxy = GET_CXY( dentry_xp ); vfs_dentry_t * dentry_ptr = GET_PTR( dentry_xp ); // return child inode *child_xp = (xptr_t)hal_remote_lwd( XPTR( dentry_cxy , &dentry_ptr->child_xp ) ); return true; } // end vfs_get_child() ////////////////////////////////////////////////////////////////////////////////////////// // This static function is used by the vfs_lookup() function. // It takes the pointer on a buffer containing a complete pathname, and return // in the buffer, allocated by the caller, a single name in the path. // It return also in the pointer the next character to analyse in the path. // Finally it returns a boolean, that is true when the returned is the // last name in the path. The names are supposed to be separated by one or several '/' // characters, that are not written in the buffer. ////////////////////////////////////////////////////////////////////////////////////////// // @ current : pointer on first character to analyse in buffer containing the path. // @ name : [out] pointer on buffer allocated by the caller for the returned name. // @ next : [out] pointer on next character to analyse in buffer containing the path. // @ last : [out] true if the returned name is the last (NUL character found). // @ return 0 if success / return EINVAL if string empty (first chracter is NUL). ////////////////////////////////////////////////////////////////////////////////////////// static error_t vfs_get_name_from_path( char * current, char * name, char ** next, bool_t * last ) { char * ptr = current; // skip leading '/' characters while( *ptr == '/' ) ptr++; // return EINVAL if string empty if( *ptr == 0 ) return EINVAL; // copy all characters in name until NUL or '/' while( (*ptr != 0) && (*ptr !='/') ) *(name++) = *(ptr++); // set NUL terminating character in name buffer *(name++) = 0; // return last an next if( *ptr == 0 ) // last found character is NUL => last name in path { *last = true; } else // last found character is '/' => skip it { *last = false; *next = ptr + 1; } return 0; } // end vfs_get name_from_path() ////////////////////////////////////////////// error_t vfs_lookup( xptr_t cwd_xp, char * pathname, uint32_t mode, xptr_t * inode_xp ) { char name[CONFIG_VFS_MAX_NAME_LENGTH]; // one name in path xptr_t parent_xp; // extended pointer on parent inode cxy_t parent_cxy; // cluster for parent inode vfs_inode_t * parent_ptr; // local pointer on parent inode xptr_t child_xp; // extended pointer on child inode cxy_t child_cxy; // cluster for child inode vfs_inode_t * child_ptr; // local pointer on child inode vfs_fs_type_t fs_type; // File system type vfs_ctx_t * ctx_ptr; // local pointer on FS context char * current; // current pointer on path char * next; // next value for current pointer bool_t last; // true when the name is the last in path bool_t found; // true when a child has been found bool_t dir; // searched inode is a directory bool_t create; // searched inode must be created if not found bool_t excl; // searched inode must not exist thread_t * this; // pointer on calling thread descriptor process_t * process; // pointer on calling process descriptor error_t error; this = CURRENT_THREAD; process = this->process; #if DEBUG_VFS_LOOKUP uint32_t cycle = (uint32_t)hal_get_cycles(); if( DEBUG_VFS_LOOKUP < cycle ) printk("\n[DBG] %s : thread %x in process %x enter for <%s> / cycle %d\n", __FUNCTION__, this->trdid, process->pid, pathname, cycle ); #endif // compute lookup flags dir = mode & VFS_LOOKUP_DIR; create = mode & VFS_LOOKUP_CREATE; excl = mode & VFS_LOOKUP_EXCL; // get extended pointer on first inode to search if( pathname[0] == '/' ) parent_xp = process->vfs_root_xp; else parent_xp = cwd_xp; // initialise other loop variables current = pathname; next = NULL; last = false; child_xp = XPTR_NULL; // take lock on parent inode vfs_inode_lock( parent_xp ); // sequencially loop on nodes in pathname // load from device if one node in path not found in inode tree // exit loop when last name found (i.e. last == true) do { // get one name from path, and "last" flag vfs_get_name_from_path( current , name , &next , &last ); #if (DEBUG_VFS_LOOKUP & 1) if( DEBUG_VFS_LOOKUP < cycle ) printk("\n[DBG] %s : look for <%s> / last = %d\n", __FUNCTION__ , name , last ); #endif // search a child dentry matching name in parent inode found = vfs_get_child( parent_xp, name, &child_xp ); if (found == false ) // child not found in inode tree { #if (DEBUG_VFS_LOOKUP & 1) if( DEBUG_VFS_LOOKUP < cycle ) printk("\n[DBG] %s : miss <%s> node => try to create it\n", __FUNCTION__ , name ); #endif // if a child node is not found in the inode tree, // we introduce a new (dentry/inode) in inode tree, // and try to find it by scanning the parent directory mapper. // . if it is found in parent mapper: // - if the child is a directory, the child mapper is loaded from device // - if the child is not a directory, the search is completed // . if it is not found in the parent mapper: // - if ( not last or not create ) an error is reported // - if (last and create and dir) a new directory is created // - if (last and create and not dir) a new file is created // release lock on parent inode vfs_inode_unlock( parent_xp ); // get parent inode FS type parent_cxy = GET_CXY( parent_xp ); parent_ptr = GET_PTR( parent_xp ); ctx_ptr = (vfs_ctx_t *)hal_remote_lpt( XPTR( parent_cxy,&parent_ptr->ctx ) ); fs_type = hal_remote_lw( XPTR( parent_cxy , &ctx_ptr->type ) ); // select a cluster for missing inode child_cxy = vfs_cluster_random_select(); // insert a new child dentry/inode in inode tree error = vfs_add_child_in_parent( child_cxy, 0, // type will be updated later fs_type, parent_xp, name, NULL, // fs_type_specific inode extend &child_xp ); if( error ) { printk("\n[ERROR] in %s : cannot create node %s for path <%s>\n", __FUNCTION__ , name, pathname ); return ENOMEM; } // get child inode cluster and local pointer child_cxy = GET_CXY( child_xp ); child_ptr = GET_PTR( child_xp ); #if (DEBUG_VFS_LOOKUP & 1) if( DEBUG_VFS_LOOKUP < cycle ) printk("\n[DBG] %s : missing <%s> inode speculatively created / cxy %x / ptr %x\n", __FUNCTION__ , name , child_cxy, child_ptr ); #endif // scan parent mapper to complete the missing inode if( parent_cxy == local_cxy ) { error = vfs_inode_load( parent_ptr, name, child_xp ); } else { rpc_vfs_inode_load_client( parent_cxy, parent_ptr, name, child_xp, &error ); } if ( error ) // child not found in parent mapper { if( last && create && dir ) // new directory => update inode type { hal_remote_sw( XPTR( child_cxy, &child_ptr->type ), INODE_TYPE_DIR ); #if (DEBUG_VFS_LOOKUP & 1) if( DEBUG_VFS_LOOKUP < cycle ) printk("\n[DBG] %s : created node <%s> in path %s / type DIR\n", __FUNCTION__ , name, pathname ); #endif } else if ( last && create ) // new file => update inode type { hal_remote_sw( XPTR( child_cxy, &child_ptr->type ), INODE_TYPE_FILE ); #if (DEBUG_VFS_LOOKUP & 1) if( DEBUG_VFS_LOOKUP < cycle ) printk("\n[DBG] %s : created node <%s> in path %s / type FILE\n", __FUNCTION__ , name, pathname ); #endif } else // not last or not create => remove created node { printk("\n[ERROR] in %s : <%s> node not found in parent for <%s>\n", __FUNCTION__ , name , pathname ); vfs_remove_child_from_parent( child_xp ); return ENOENT; } } else // child found in parent { // load child mapper from device if child is a directory (prefetch) if( hal_remote_lw( XPTR( child_cxy , &child_ptr->type ) ) == INODE_TYPE_DIR ) { if( child_cxy == local_cxy ) { error = vfs_mapper_load_all( child_ptr ); } else { rpc_vfs_mapper_load_all_client( child_cxy, child_ptr, &error ); } if ( error ) { printk("\n[ERROR] in %s : cannot load <%s> from device\n", __FUNCTION__ , name ); vfs_remove_child_from_parent( child_xp ); return EIO; } #if (DEBUG_VFS_LOOKUP & 1) if( DEBUG_VFS_LOOKUP < cycle ) printk("\n[DBG] %s : load mapper from device for node <%s> in path %s\n", __FUNCTION__ , name, pathname ); #endif } } // take lock on parent inode vfs_inode_lock( parent_xp ); } else // child found in inode tree { #if (DEBUG_VFS_LOOKUP & 1) if( DEBUG_VFS_LOOKUP < cycle ) printk("\n[DBG] %s : found <%s> / inode %x in cluster %x\n", __FUNCTION__ , name , GET_PTR(child_xp) , GET_CXY(child_xp) ); #endif child_ptr = GET_PTR( child_xp ); child_cxy = GET_CXY( child_xp ); parent_cxy = GET_CXY( parent_xp ); parent_ptr = GET_PTR( parent_xp ); if( last && (mode & VFS_LOOKUP_CREATE) && (mode & VFS_LOOKUP_EXCL) ) { printk("\n[ERROR] in %s : node already exist <%s>\n", __FUNCTION__, name ); return EINVAL; } } // TODO check access rights here [AG] // error = vfs_access_denied( child_xp, // client_uid, // client_gid ); // if( error ) // { // printk("\n[ERROR] in %s : thread %x / permission denied for %s\n", // __FUNCTION__ , this , name ); // return EACCES; // } // take lock on child inode and release lock on parent vfs_inode_lock( child_xp ); vfs_inode_unlock( parent_xp ); // update loop variables parent_xp = child_xp; current = next; } while( last == false ); // release lock vfs_inode_unlock( parent_xp ); #if DEBUG_VFS_LOOKUP cycle = (uint32_t)hal_get_cycles(); if( DEBUG_VFS_LOOKUP < cycle ) printk("\n[DBG] %s : thread %x in process %x exit for <%s>\n" " parent %x in cluster %x / child %x in cluster %x / cycle %d\n", __FUNCTION__ , this->trdid, process->pid, pathname, parent_ptr, parent_cxy, child_ptr, child_cxy, cycle ); #endif // return searched pointer if( mode & VFS_LOOKUP_PARENT ) *inode_xp = parent_xp; else *inode_xp = child_xp; return 0; } // end vfs_lookup() //////////////////////////////////////////// error_t vfs_get_path( xptr_t searched_xp, char * buffer, uint32_t max_size ) { xptr_t dentry_xp; // extended pointer on current dentry char * name; // local pointer on current dentry name uint32_t length; // length of current dentry name uint32_t count; // number of characters written in buffer uint32_t index; // slot index in buffer xptr_t inode_xp; // extended pointer on // implementation note: // we use two variables "index" and "count" because the buffer // is written in decreasing index order (from leaf to root) // TODO : handle conflict with a concurrent rename [AG] // FIXME : handle synchro in the loop [AG] // set the NUL character in buffer / initialise buffer index and count buffer[max_size - 1] = 0; count = 1; index = max_size - 2; // initialize current inode inode_xp = searched_xp; // exit when root inode found (i.e. dentry_xp == XPTR_NULL) do { // get inode cluster and local pointer cxy_t inode_cxy = GET_CXY( inode_xp ); vfs_inode_t * inode_ptr = GET_PTR( inode_xp ); // get extended pointer on parent dentry dentry_xp = (xptr_t)hal_remote_lwd( XPTR( inode_cxy , inode_ptr->parent_xp ) ); // get dentry cluster and local pointer cxy_t dentry_cxy = GET_CXY( dentry_xp ); vfs_dentry_t * dentry_ptr = GET_PTR( dentry_xp ); // get dentry name length and pointer length = hal_remote_lw( XPTR( dentry_cxy , &dentry_ptr->length ) ); name = (char *)hal_remote_lpt( XPTR( dentry_cxy , &dentry_ptr->name ) ); // update index and count index -= (length + 1); count += (length + 1); // check buffer overflow if( count >= max_size ) { printk("\n[ERROR] in %s : kernel buffer too small\n", __FUNCTION__ ); return EINVAL; } // update pathname hal_remote_memcpy( XPTR( local_cxy , &buffer[index + 1] ) , XPTR( dentry_cxy , name ) , length ); buffer[index] = '/'; // get extended pointer on next inode inode_xp = (xptr_t)hal_remote_lwd( XPTR( dentry_cxy , dentry_ptr->parent ) ); } while( (dentry_xp != XPTR_NULL) ); return 0; } // end vfs_get_path() ////////////////////////////////////////////////////////////// error_t vfs_add_child_in_parent( cxy_t child_cxy, vfs_inode_type_t inode_type, vfs_fs_type_t fs_type, xptr_t parent_xp, char * name, void * extend, xptr_t * child_xp ) { error_t error; xptr_t dentry_xp; // extended pointer on created dentry xptr_t inode_xp; // extended pointer on created inode cxy_t parent_cxy; // parent inode cluster identifier vfs_inode_t * parent_ptr; // parent inode local pointer // get parent inode cluster and local pointer parent_cxy = GET_CXY( parent_xp ); parent_ptr = GET_PTR( parent_xp ); #if DEBUG_VFS_ADD_CHILD uint32_t cycle = (uint32_t)hal_get_cycles(); if( DEBUG_VFS_ADD_CHILD < cycle ) printk("\n[DBG] %s : thread %x enter for <%s> / child_cxy = %x / parent_cxy = %x / cycle %d\n", __FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid, name, child_cxy, parent_cxy, (uint32_t)hal_get_cycles() ); #endif // 1. create dentry if( parent_cxy == local_cxy ) // parent cluster is the local cluster { error = vfs_dentry_create( fs_type, name, parent_ptr, &dentry_xp ); #if (DEBUG_VFS_ADD_CHILD & 1) if( (DEBUG_VFS_ADD_CHILD < cycle) && (error == 0) ) printk("\n[DBG] %s : dentry <%s> created in cluster %x\n", __FUNCTION__, name, local_cxy ); #endif } else // parent cluster is remote { rpc_vfs_dentry_create_client( parent_cxy, fs_type, name, parent_ptr, &dentry_xp, &error ); #if (DEBUG_VFS_ADD_CHILD & 1) if( (DEBUG_VFS_ADD_CHILD < cycle) && (error == 0) ) printk("\n[DBG] %s : dentry <%s> created in cluster %x\n", __FUNCTION__, name, parent_cxy ); #endif } if( error ) { printk("\n[ERROR] in %s : cannot create dentry <%s> in cluster %x\n", __FUNCTION__ , name , parent_cxy ); return ENOMEM; } // 2. create child inode TODO : define attr / mode / uid / gid uint32_t attr = 0; uint32_t mode = 0; uint32_t uid = 0; uint32_t gid = 0; if( child_cxy == local_cxy ) // child cluster is the local cluster { error = vfs_inode_create( dentry_xp, fs_type, inode_type, extend, attr, mode, uid, gid, &inode_xp ); #if (DEBUG_VFS_ADD_CHILD & 1) if( DEBUG_VFS_ADD_CHILD < cycle ) printk("\n[DBG] %s : inode <%x> created in cluster %x\n", __FUNCTION__ , GET_PTR(inode_xp) , local_cxy ); #endif } else // child cluster is remote { rpc_vfs_inode_create_client( child_cxy, dentry_xp, fs_type, inode_type, extend, attr, mode, uid, gid, &inode_xp, &error ); #if (DEBUG_VFS_ADD_CHILD & 1) if( DEBUG_VFS_ADD_CHILD < cycle ) printk("\n[DBG] %s : inode <%s> created in cluster %x\n", __FUNCTION__ , GET_PTR(inode_xp) , child_cxy ); #endif } if( error ) { printk("\n[ERROR] in %s : cannot create inode in cluster %x\n", __FUNCTION__ , child_cxy ); vfs_dentry_t * dentry = GET_PTR( dentry_xp ); if( parent_cxy == local_cxy ) vfs_dentry_destroy( dentry ); else rpc_vfs_dentry_destroy_client( parent_cxy , dentry , &error ); return ENOMEM; } // 3. update extended pointer on inode in dentry cxy_t dentry_cxy = GET_CXY( dentry_xp ); vfs_dentry_t * dentry_ptr = GET_PTR( dentry_xp ); hal_remote_swd( XPTR( dentry_cxy , &dentry_ptr->child_xp ) , inode_xp ); #if DEBUG_VFS_ADD_CHILD cycle = (uint32_t)hal_get_cycles(); if( DEBUG_VFS_ADD_CHILD < cycle ) printk("\n[DBG] %s : thread %x in process %x exit for <%s>\n", __FUNCTION__, CURRENT_THREAD, CURRENT_THREAD->process->pid, name ); #endif // success : return extended pointer on child inode *child_xp = inode_xp; return 0; // FIXME update the refcount fields in both inode and dentry } // end vfs_add_child_in_parent() /////////////////////////////////////////////////////// error_t vfs_remove_child_from_parent( xptr_t inode_xp ) { cxy_t inode_cxy; vfs_inode_t * inode_ptr; xptr_t dentry_xp; cxy_t dentry_cxy; vfs_dentry_t * dentry_ptr; error_t error; // get inode cluster and local pointer inode_cxy = GET_CXY( inode_xp ); inode_ptr = GET_PTR( inode_xp ); // get cluster and pointers of associated dentry dentry_xp = hal_remote_lwd( XPTR( inode_cxy , &inode_ptr->parent_xp ) ); dentry_cxy = GET_CXY( dentry_xp ); dentry_ptr = GET_PTR( dentry_xp ); // FIXME update the refcount fields in both inode and dentry // delete dentry if( dentry_cxy == local_cxy ) { error = vfs_dentry_destroy( dentry_ptr ); } else { rpc_vfs_dentry_destroy_client( dentry_cxy, dentry_ptr, &error ); } if( error ) return EINVAL; // delete inode if( inode_cxy == local_cxy ) { vfs_inode_destroy( inode_ptr ); } else { rpc_vfs_inode_destroy_client( inode_cxy, inode_ptr, &error ); } if( error ) return EINVAL; return 0; } // end vfs_remove_child_from_parent() ////////////////////////////////////////////////////////////////////////////////////////// // Mapper related functions ////////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////// error_t vfs_mapper_move_page( page_t * page, bool_t to_mapper ) { error_t error = 0; assert( (page != NULL) , "page pointer is NULL\n" ); mapper_t * mapper = page->mapper; assert( (mapper != NULL) , "no mapper for page\n" ); #if DEBUG_VFS_MAPPER_MOVE uint32_t cycle = (uint32_t)hal_get_cycles(); if( DEBUG_VFS_MAPPER_MOVE < cycle ) printk("\n[DBG] %s : thread %x enter for page %d / mapper %x / inode %x / cycle %d\n", __FUNCTION__, CURRENT_THREAD, page->index, mapper, mapper->inode, cycle ); #endif // get FS type vfs_fs_type_t fs_type = mapper->type; // call relevant FS function if( fs_type == FS_TYPE_FATFS ) { rwlock_wr_lock( &mapper->lock ); error = fatfs_mapper_move_page( page , to_mapper ); rwlock_wr_unlock( &mapper->lock ); } else if( fs_type == FS_TYPE_RAMFS ) { assert( false , "should not be called for RAMFS\n" ); } else if( fs_type == FS_TYPE_DEVFS ) { assert( false , "should not be called for DEVFS\n" ); } else { assert( false , "undefined file system type\n" ); } #if DEBUG_VFS_MAPPER_MOVE cycle = (uint32_t)hal_get_cycles(); if( DEBUG_VFS_MAPPER_MOVE < cycle ) printk("\n[DBG] %s : thread %x exit for page %d / mapper %x / inode %x / cycle %d\n", __FUNCTION__, CURRENT_THREAD, page->index, mapper, mapper->inode, cycle ); #endif return error; } // end vfs_move_page() ////////////////////////////////////////////////// error_t vfs_mapper_load_all( vfs_inode_t * inode ) { assert( (inode != NULL) , "inode pointer is NULL\n" ); uint32_t index; page_t * page; mapper_t * mapper = inode->mapper; uint32_t size = inode->size; assert( (mapper != NULL) , "mapper pointer is NULL\n" ); #if DEBUG_VFS_MAPPER_LOAD uint32_t cycle = (uint32_t)hal_get_cycles(); if( DEBUG_VFS_MAPPER_MOVE < cycle ) printk("\n[DBG] %s : thread %x enter for inode %x in cluster %x / cycle %d\n", __FUNCTION__, CURRENT_THREAD, inode, local_cxy, cycle ); #endif // compute number of pages uint32_t npages = size >> CONFIG_PPM_PAGE_SHIFT; if( (size & CONFIG_PPM_PAGE_MASK) || (size == 0) ) npages++; // loop on pages for( index = 0 ; index < npages ; index ++ ) { // this function allocates the missing page in mapper, // and call the vfs_mapper_move_page() to load the page from device page = mapper_get_page( mapper , index ); if( page == NULL ) return EIO; } #if DEBUG_VFS_MAPPER_LOAD cycle = (uint32_t)hal_get_cycles(); if( DEBUG_VFS_MAPPER_MOVE < cycle ) printk("\n[DBG] %s : thread %x exit for inode %x in cluster %x / cycle %d\n", __FUNCTION__, CURRENT_THREAD, inode, local_cxy, cycle ); #endif return 0; } // end vfs_mapper_load_all()