/* * core.c - core descriptor access function. * * Author Ghassan Almaless (2008,2009,2010,2011,2012) * Alain Greiner (2016,2017,2018,2019,2020) * * Copyright (c) UPMC Sorbonne Universites * * This file is part of ALMOS-MKH. * * ALMOS-MKH.is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2.0 of the License. * * ALMOS-MKH is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with ALMOS-MKH; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include #include #include #include #include #include #include #include #include #include #include #include ///////////////////////////////// void core_init( core_t * core, lid_t lid, gid_t gid ) { core->lid = lid; core->gid = gid; core->cycles = 0; core->time_stamp = 0; core->ticks_nr = 0; core->usage = 0; core->fpu_owner = NULL; core->rand_last = hal_time_stamp() & 0xFFF; // initialize the scheduler sched_init( core ); // initialise the alarms lock remote_busylock_init( XPTR( local_cxy , &core->alarms_lock ) , LOCK_CORE_ALARMS ); // initialise the alarms list list_root_init( &core->alarms_root ); } //////////////////////////////////////////////////////////////////////////////////// // This static function checks the alarms registered in the core, and calls the // relevant alarm handler for all alarms whose time is elapded. // It does not take the lock protecting the alarm list, because it access only // the first alarm in the list, and all modifications in he list are done // the low level access functions called by the handler(s). //////////////////////////////////////////////////////////////////////////////////// static void core_check_alarms( core_t * core ) { alarm_handler_t * handler; // get pointer on root of alarms list list_entry_t * root = &core->alarms_root; // does nothing if list is empty if( list_is_empty( root ) ) return; while( 1 ) { // get pointer on first alarm alarm_t * alarm = LIST_FIRST( root , alarm_t , list ); // get first alarm date cycle_t alarm_date = alarm->date; // get current date cycle_t current_date = hal_get_cycles(); // call handler if delay elapsed, and retry if( current_date >= alarm_date ) { // get pointer on registered alarm handler handler = (alarm_handler_t *)alarm->func_ptr; // call alarm handler handler( alarm->args_xp ); } else // exit loop when first alarm delay not elapsed { break; } } } // end core_check_alarms() ////////////////////// lid_t core_lid( void ) { uint32_t i; // get pointer on local cluser descriptor cluster_t * cluster = LOCAL_CLUSTER; // get core gid from hardware register gid_t gid = hal_get_gid(); // makes an associative search in core_tbl[] from gid for( i = 0 ; i < cluster->cores_nr ; i++ ) { if( gid == cluster->core_tbl[i].gid ) return i; } assert( __FUNCTION__, false , "core not found" ); return 0; } ////////////////////////////////////////////// inline uint32_t core_get_rand( core_t * core ) { uint32_t value = ((core->rand_last * CONFIG_RDNG_PARAM_A) + CONFIG_RDNG_PARAM_C) ^ (hal_get_cycles() & 0xFFF); core->rand_last = value; return value; } //////////////////////////////////// void core_get_time( core_t * core, uint32_t * tm_s, uint32_t * tm_us ) { // get number of cycles uint64_t cycles = core->cycles; // get number of cycles per second uint32_t cycles_per_second = LOCAL_CLUSTER->sys_clk; *tm_s = cycles / cycles_per_second; *tm_us = (cycles * 1000000) % cycles_per_second; } //////////////////////////////// void core_clock( core_t * core ) { uint32_t ticks; // update ticks counter ticks = core->ticks_nr++; // handle alarms core_check_alarms( core ); // handle scheduler if( (ticks % CONFIG_SCHED_TICKS_PER_QUANTUM) == 0 ) sched_yield( "TICK"); } //////////////////////////////////////// void core_compute_stats( core_t * core ) { thread_t * idle = core->scheduler.idle; uint32_t ticks = core->ticks_nr; uint32_t idle_percent; uint32_t busy_percent; uint32_t usage; // compute cumulated usage ticks = (ticks) ? ticks : 1; idle_percent = (idle->ticks_nr * 100) / ticks; idle_percent = (idle_percent > 100) ? 100 : idle_percent; busy_percent = 100 - idle_percent; usage = (busy_percent + core->usage) / 2; // update core descriptor core->usage = usage; hal_fence(); core->ticks_nr = 0; idle->ticks_nr = 0; } ///////////////////////////////////// void core_reset_stats( core_t * core ) { thread_t * idle = core->scheduler.idle; core->ticks_nr = 0; core->usage = 0; idle->ticks_nr = 0; hal_fence(); }