/* * device.c - device descriptor operations implementation * * Authors Alain Greiner (2016) * * Copyright (c) UPMC Sorbonne Universites * * This file is part of ALMOS-MKH. * * ALMOS-MKH.is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2.0 of the License. * * ALMOS-MKH.is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with ALMOS-MKH.; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include #include #include #include #include #include #include #include #include //////////////////////////////////////////////////////////////////////////////////// // This array defines printable strings for devices functionnal types. //////////////////////////////////////////////////////////////////////////////////// char * device_func_str[DEV_FUNC_NR] = { "RAM", "ROM", "TXT", "FBF", "IOB", "IOC", "MMC", "MWR", "NIC", "CMA", "XCU", "PIC" }; //////////////////////////////////////// xptr_t device_alloc( boot_info_t * info, bool_t is_local ) { xptr_t dev_xp; device_t * dev_ptr; uint32_t index; uint32_t x; uint32_t y; cxy_t target_cxy; kmem_req_t req; error_t error; if( is_local ) // forced local allocation { req.type = KMEM_DEVICE; req.flags = AF_ZERO; dev_ptr = (device_t *)kmem_alloc( &req ); if( dev_ptr == NULL ) return XPTR_NULL; else return ( XPTR( local_cxy , dev_ptr ) ); } else // likely remote allocation { // select (pseudo-randomly) a target cluster index = ( hal_time_stamp() + hal_get_gid() ) % (info->x_size * info->y_size); x = index / info->y_size; y = index % info->y_size; target_cxy = (x<y_width) + y; if( target_cxy == local_cxy ) // local allocation { req.type = KMEM_DEVICE; req.flags = AF_ZERO; dev_ptr = (device_t *)kmem_alloc( &req ); if( dev_ptr == NULL ) return XPTR_NULL; else return ( XPTR( local_cxy , dev_ptr ) ); } else // remote allocation { rpc_device_alloc_client( target_cxy , &dev_xp , &error ); if( error ) return XPTR_NULL; else return ( dev_xp ); } } } // end device_alloc() ///////////////////////////////////// void device_init( xptr_t device, uint32_t func, uint32_t impl, uint32_t channel, uint32_t is_rx, xptr_t base, uint32_t size ) { // get device cluster and local pointer cxy_t cxy = GET_CXY( device ); device_t * ptr = (device_t *)GET_PTR( device ); // initialize waiting threads queue and associated lock remote_spinlock_init( XPTR( cxy , &ptr->wait_lock ) ); xlist_root_init( XPTR( cxy , &ptr->wait_root ) ); // initialize basic attributes hal_remote_sw ( XPTR( cxy , &ptr->func ) , (uint32_t)func ); hal_remote_sw ( XPTR( cxy , &ptr->impl ) , (uint32_t)impl ); hal_remote_sw ( XPTR( cxy , &ptr->channel ) , (uint32_t)channel ); hal_remote_sw ( XPTR( cxy , &ptr->is_rx ) , (uint32_t)is_rx ); hal_remote_swd( XPTR( cxy , &ptr->base ) , (uint64_t)base ); hal_remote_sw ( XPTR( cxy , &ptr->size ) , (uint64_t)size ); } // end device_init() //////////////////////////////////////////////// void device_register_command( xptr_t xp_dev, thread_t * thread ) { thread_t * thread_ptr = CURRENT_THREAD; // get device descriptor cluster and local pointer cxy_t dev_cxy = GET_CXY( xp_dev ); device_t * dev_ptr = (device_t *)GET_PTR( xp_dev ); // build extended pointers on client thread xlist and device root xptr_t xp_list = XPTR( local_cxy , &thread_ptr->wait_list ); xptr_t xp_root = XPTR( dev_cxy , &dev_ptr->wait_root ); // get lock protecting queue remote_spinlock_lock( XPTR( dev_cxy , &dev_ptr->wait_lock ) ); // register client thread in waiting queue xlist_add_last( xp_root , xp_list ); // unblock server thread thread_unblock( XPTR( dev_cxy , &dev_ptr->server ) , THREAD_BLOCKED_DEV_QUEUE ); // release lock remote_spinlock_unlock( XPTR( dev_cxy , &dev_ptr->wait_lock ) ); // client thread goes to blocked state and deschedule thread_block( thread_ptr , THREAD_BLOCKED_IO ); sched_yield(); } // end device register_command()