Changeset 450 for trunk/kernel


Ignore:
Timestamp:
Jun 29, 2018, 10:44:14 AM (6 years ago)
Author:
alain
Message:

Fix a bug in function sched_handle_signal():
When the deleted user thread is the last executed thread,
the sched->u_last field must be updated to point on another user thread.

Location:
trunk/kernel
Files:
13 edited

Legend:

Unmodified
Added
Removed
  • trunk/kernel/kern/chdev.c

    r447 r450  
    163163uint32_t rx_cycle = (uint32_t)hal_get_cycles();
    164164if( (is_rx) && (DEBUG_CHDEV_CMD_RX < rx_cycle) )
    165 printk("\n[DBG] %s : client_thread %x (%s) enter for RX / cycle %d\n",
    166 __FUNCTION__, this, thread_type_str(this->type) , rx_cycle );
     165printk("\n[DBG] %s : client_thread %x (%s) enter for RX / server = %x / cycle %d\n",
     166__FUNCTION__, this, thread_type_str(this->type) , server_ptr, rx_cycle );
    167167#endif
    168168
     
    170170uint32_t tx_cycle = (uint32_t)hal_get_cycles();
    171171if( (is_rx == 0) && (DEBUG_CHDEV_CMD_TX < tx_cycle) )
    172 printk("\n[DBG] %s : client_thread %x (%s) enter for TX / cycle %d\n",
    173 __FUNCTION__, this, thread_type_str(this->type) , tx_cycle );
     172printk("\n[DBG] %s : client_thread %x (%s) enter for TX / server = %x / cycle %d\n",
     173__FUNCTION__, this, thread_type_str(this->type) , server_ptr, tx_cycle );
    174174#endif
    175175
     
    186186    lock_xp = XPTR( chdev_cxy , &chdev_ptr->wait_lock );
    187187
    188     // critical section for the following sequence:
     188    // critical section for the following sequence: 
    189189    // (1) take the lock protecting waiting queue
    190190    // (2) block the client thread
     
    205205    thread_block( XPTR( local_cxy , CURRENT_THREAD ) , THREAD_BLOCKED_IO );
    206206
     207#if (DEBUG_CHDEV_CMD_TX & 1)
     208if( (is_rx == 0) && (DEBUG_CHDEV_CMD_TX < tx_cycle) )
     209printk("\n[DBG] in %s : client thread %x blocked\n", __FUNCTION__, this );
     210#endif
     211
    207212    // unblock server thread if required
    208213    if( hal_remote_lw( blocked_xp ) & THREAD_BLOCKED_IDLE )
    209214    thread_unblock( server_xp , THREAD_BLOCKED_IDLE );
    210215
     216#if (DEBUG_CHDEV_CMD_TX & 1)
     217if( (is_rx == 0) && (DEBUG_CHDEV_CMD_TX < tx_cycle) )
     218{
     219printk("\n[DBG] in %s : server thread %x unblocked\n", __FUNCTION__, server_ptr );
     220chdev_queue_display( chdev_xp );
     221}
     222#endif
     223
    211224    // register client thread in waiting queue
    212225    xlist_add_last( root_xp , list_xp );
    213226
     227#if (DEBUG_CHDEV_CMD_TX & 1)
     228if( (is_rx == 0)  && (DEBUG_CHDEV_CMD_TX < tx_cycle) )
     229{
     230printk("\n[DBG] in %s : thread %x registered write request in chdev\n", __FUNCTION__, this );
     231chdev_queue_display( chdev_xp );
     232}
     233#endif
     234 
    214235    // send IPI to core running the server thread when server != client
    215236    different = (lid != this->core->lid) || (local_cxy != chdev_cxy);
    216     if( different ) dev_pic_send_ipi( chdev_cxy , lid );
     237    if( different )
     238    {
     239        dev_pic_send_ipi( chdev_cxy , lid );
    217240   
     241#if (DEBUG_CHDEV_CMD_TX & 1)
     242if( (is_rx == 0)  && (DEBUG_CHDEV_CMD_TX < tx_cycle) )
     243printk("\n[DBG] in %s : client thread %x sent IPI to server thread %x\n",
     244__FUNCTION__, this, server_ptr );
     245#endif
     246
     247    }
     248 
    218249    // release lock
    219250    remote_spinlock_unlock( lock_xp );
     
    492523
    493524    // get extended pointer on root of requests queue
    494     root_xp = hal_remote_lwd( XPTR( chdev_cxy , &chdev_ptr->wait_root ) );
     525    root_xp = XPTR( chdev_cxy , &chdev_ptr->wait_root );
    495526
    496527    // get chdev name
     
    516547                        pid        = hal_remote_lw ( XPTR( thread_cxy , &process->pid        ) );
    517548
    518             printk("- trdid %X / pid %X\n", trdid, pid );
     549            printk("- thread %X / cluster %X / trdid %X / pid %X\n",
     550            thread_ptr, thread_cxy, trdid, pid );
    519551        }
    520552    }
  • trunk/kernel/kern/chdev.h

    r447 r450  
    158158 * This structure is replicated in each cluster, and is initialised during kernel init.
    159159 * It is used for fast access to a device descriptor, from type and channel for an
    160  * external peripheral, or from type and cluster for a hared internal peripheral.
     160 * external peripheral, or from type and cluster for an internal peripheral.
    161161 * - a "shared" chdev can be accessed by any thread running in any cluster.
    162162 * - a "private" chdev can only be accessed by a thread running in local cluster.
  • trunk/kernel/kern/process.c

    r446 r450  
    412412    cluster_process_copies_unlink( process );
    413413
    414     // remove process from children_list if process owner cluster
     414    // remove process from children_list
     415    // and release PID if owner cluster
    415416    if( CXY_FROM_PID( pid ) == local_cxy )
    416417    {
     
    429430            hal_remote_atomic_add( children_nr_xp , -1 );
    430431        remote_spinlock_unlock( children_lock_xp );
    431     }
    432 
    433     // release the process PID to cluster manager if process owner cluster
    434     if( CXY_FROM_PID( pid ) == local_cxy ) cluster_pid_release( pid );
     432
     433    // release the process PID to cluster manager
     434    cluster_pid_release( pid );
     435
     436    }
    435437
    436438    // FIXME close all open files and update dirty [AG]
     
    10571059    // remove thread from th_tbl[]
    10581060    process->th_tbl[ltid] = NULL;
    1059     process->th_nr--;
     1061    process->th_nr = count-1;
    10601062
    10611063    // release lock protecting th_tbl
     
    10651067    return (count == 1);
    10661068
    1067 }  // process_remove_thread()
     1069}  // end process_remove_thread()
    10681070
    10691071/////////////////////////////////////////////////////////
  • trunk/kernel/kern/process.h

    r446 r450  
    523523
    524524/*********************************************************************************************
    525  * This function attach a process descriptor in owner cluster, identified by the <process>
    526  * argument to a TXT terminal, identified by its <txt_id> channel index argument.
     525 * This function attach a process, identified by the <process> argument to a TXT terminal,
     526 * identified by the <txt_id> channel index argument.
     527 * The process descriptor identified by the <process> argument must be in the owner cluster. 
    527528 * It insert the process descriptor in the xlist rooted in the TXT_RX device.
    528529 * It is called by the process_reference_init() function.
  • trunk/kernel/kern/rpc.c

    r441 r450  
    7777    &rpc_mapper_get_page_server,        // 25
    7878    &rpc_vmm_create_vseg_server,        // 26
    79     &rpc_sched_display_server,          // 27
     79    &rpc_undefined,                     // 27   unused slot
    8080    &rpc_vmm_set_cow_server,            // 28
    8181    &rpc_vmm_display_server,            // 29
     
    20632063
    20642064/////////////////////////////////////////////////////////////////////////////////////////
    2065 // [27]          Marshaling functions attached to RPC_SCHED_DISPLAY (blocking)
    2066 /////////////////////////////////////////////////////////////////////////////////////////
    2067 
    2068 ////////////////////////////////////////////////////////
    2069 void rpc_sched_display_client( cxy_t              cxy,
    2070                                lid_t              lid)
    2071 {
    2072     assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");
    2073 
    2074     // initialise RPC descriptor header
    2075     rpc_desc_t  rpc;
    2076     rpc.index    = RPC_SCHED_DISPLAY;
    2077     rpc.blocking = true;
    2078     rpc.responses = 1;
    2079 
    2080     // set input arguments in RPC descriptor
    2081     rpc.args[0] = (uint64_t)lid;
    2082 
    2083     // register RPC request in remote RPC fifo
    2084     rpc_send( cxy , &rpc );
    2085 
    2086 }
    2087 
    2088 //////////////////////////////////////////
    2089 void rpc_sched_display_server( xptr_t xp )
    2090 {
    2091     // get client cluster identifier and pointer on RPC descriptor
    2092     cxy_t        cxy  = GET_CXY( xp );
    2093     rpc_desc_t * desc = GET_PTR( xp );
    2094 
    2095     // get input arguments from client RPC descriptor
    2096     lid_t lid = (lid_t)hal_remote_lwd( XPTR(cxy , &desc->args[0]));
    2097    
    2098     // call local kernel function
    2099     sched_display( lid );
    2100 
    2101 }
     2065// [27]         undefined slot
     2066/////////////////////////////////////////////////////////////////////////////////////////
    21022067
    21032068/////////////////////////////////////////////////////////////////////////////////////////
  • trunk/kernel/kern/rpc.h

    r438 r450  
    8989    RPC_MAPPER_GET_PAGE        = 25,
    9090    RPC_VMM_CREATE_VSEG        = 26,
    91     RPC_SCHED_DISPLAY          = 27,
     91    RPC_UNDEFINED_27           = 27,
    9292    RPC_VMM_SET_COW            = 28,
    9393    RPC_VMM_DISPLAY            = 29,
     
    635635
    636636/***********************************************************************************
    637  * [27] The RPC_SCHED_DISPLAY allows a client thread to request the display
    638  * of a remote scheduler, identified by the <lid> argument.
    639  ***********************************************************************************
    640  * @ cxy         : server cluster identifier.
    641  * @ lid         : [in]  local index of target core in client cluster.
    642  **********************************************************************************/
    643 void rpc_sched_display_client( cxy_t              cxy,
    644                                lid_t              lid );
    645 
    646 void rpc_sched_display_server( xptr_t xp );
     637 * [27] undefined slot
     638 **********************************************************************************/
    647639
    648640/***********************************************************************************
  • trunk/kernel/kern/scheduler.c

    r445 r450  
    104104    list_entry_t * root;
    105105    bool_t         done;
     106    uint32_t       count;
    106107
    107108    // take lock protecting sheduler lists
     
    113114        root    = &sched->k_root;
    114115        last    = sched->k_last;
     116        done    = false;
     117        count   = 0;
    115118        current = last;
    116         done    = false;
    117119
    118120        while( done == false )
    119121        {
     122            assert( (count < sched->k_threads_nr), __FUNCTION__, "bad kernel threads list" );
     123
    120124            // get next entry in kernel list
    121125            current = current->next;
     
    126130            // skip the root that does not contain a thread
    127131            if( current == root ) continue;
     132            else                  count++;
    128133
    129134            // get thread pointer for this entry
    130135            thread = LIST_ELEMENT( current , thread_t , sched_list );
    131136
    132             // select kernel thread if non blocked and non IDLE
     137            // select kernel thread if non blocked and non THREAD_IDLE
    133138            if( (thread->blocked == 0)  && (thread->type != THREAD_IDLE) )
    134139            {
     
    137142            }
    138143        } // end loop on kernel threads
    139     } // end if kernel threads
     144    } // end kernel threads
    140145
    141146    // second : scan the user threads list if not empty
     
    144149        root    = &sched->u_root;
    145150        last    = sched->u_last;
     151        done    = false;
     152        count   = 0;
    146153        current = last;
    147         done    = false;
    148154
    149155        while( done == false )
    150156        {
     157            assert( (count < sched->u_threads_nr), __FUNCTION__, "bad user threads list" );
     158
    151159            // get next entry in user list
    152160            current = current->next;
     
    157165            // skip the root that does not contain a thread
    158166            if( current == root ) continue;
     167            else                  count++;
    159168
    160169            // get thread pointer for this entry
    161170            thread = LIST_ELEMENT( current , thread_t , sched_list );
    162171
    163             // return thread if non blocked
     172            // select thread if non blocked
    164173            if( thread->blocked == 0 )
    165174            {
     
    168177            }
    169178        } // end loop on user threads
    170     } // end if user threads
     179    } // end user threads
    171180
    172181    // third : return idle thread if no other runnable thread
     
    240249            sched->u_threads_nr = threads_nr - 1;
    241250            list_unlink( &thread->sched_list );
    242             if( threads_nr == 1 ) sched->u_last = NULL;
    243 
    244             // delete thread
     251            if( sched->u_last == &thread->sched_list )
     252            {
     253                if( threads_nr == 1 )
     254                {
     255                    sched->u_last = NULL;
     256                }
     257                else if( sched->u_root.next == &thread->sched_list )
     258                {
     259                    sched->u_last = sched->u_root.pred;
     260                }
     261                else
     262                {
     263                    sched->u_last = sched->u_root.next;
     264                }
     265            }
     266
     267            // delete thread descriptor
    245268            last_thread = thread_destroy( thread );
    246269
     
    263286__FUNCTION__ , process->pid , local_cxy , cycle );
    264287#endif
    265 
    266288            }
    267289        }
     
    421443}  // end sched_display()
    422444
     445/////////////////////////////////////
     446void sched_remote_display( cxy_t cxy,
     447                           lid_t lid )
     448{
     449    thread_t     * thread;
     450    uint32_t       save_sr;
     451
     452    // check cxy
     453    bool_t undefined = cluster_is_undefined( cxy );
     454    assert( (undefined == false), __FUNCTION__, "illegal cluster %x\n", cxy );
     455
     456    // check lid
     457    uint32_t cores = hal_remote_lw( XPTR( cxy , &LOCAL_CLUSTER->cores_nr ) );
     458    assert( (lid < cores), __FUNCTION__, "illegal core index %d\n", lid);
     459
     460    // get local pointer on target scheduler
     461    core_t      * core  = &LOCAL_CLUSTER->core_tbl[lid];
     462    scheduler_t * sched = &core->scheduler;
     463
     464    // get local pointer on current thread in target scheduler
     465    thread_t * current = hal_remote_lpt( XPTR( cxy, &sched->current ) );
     466
     467    // get local pointer on the first kernel and user threads list_entry
     468    list_entry_t * k_entry = hal_remote_lpt( XPTR( cxy , &sched->k_root.next ) );
     469    list_entry_t * u_entry = hal_remote_lpt( XPTR( cxy , &sched->u_root.next ) );
     470   
     471    // get pointers on TXT0 chdev
     472    xptr_t    txt0_xp  = chdev_dir.txt_tx[0];
     473    cxy_t     txt0_cxy = GET_CXY( txt0_xp );
     474    chdev_t * txt0_ptr = GET_PTR( txt0_xp );
     475
     476    // get extended pointer on remote TXT0 chdev lock
     477    xptr_t  lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock );
     478
     479    // get TXT0 lock in busy waiting mode
     480    remote_spinlock_lock_busy( lock_xp , &save_sr );
     481
     482    // display header
     483    nolock_printk("\n***** threads on core[%x,%d] / current %x / cycle %d\n",
     484    cxy , lid, current, (uint32_t)hal_get_cycles() );
     485
     486    // display kernel threads
     487    while( k_entry != &sched->k_root )
     488    {
     489        // get local pointer on kernel_thread
     490        thread = LIST_ELEMENT( k_entry , thread_t , sched_list );
     491
     492        // get relevant thead info
     493        thread_type_t type    = hal_remote_lw ( XPTR( cxy , &thread->type ) );
     494        trdid_t       trdid   = hal_remote_lw ( XPTR( cxy , &thread->trdid ) );
     495        uint32_t      blocked = hal_remote_lw ( XPTR( cxy , &thread->blocked ) );
     496        uint32_t      flags   = hal_remote_lw ( XPTR( cxy , &thread->flags ) );
     497        process_t *   process = hal_remote_lpt( XPTR( cxy , &thread->process ) );
     498        pid_t         pid     = hal_remote_lw ( XPTR( cxy , &process->pid ) );
     499
     500        // display thread info
     501        if (type == THREAD_DEV)
     502        {
     503            char      name[16];
     504            chdev_t * chdev = hal_remote_lpt( XPTR( cxy , &thread->chdev ) );
     505            hal_remote_strcpy( XPTR( local_cxy , name ), XPTR( cxy , &chdev->name ) );
     506
     507            nolock_printk(" - %s / pid %X / trdid %X / desc %X / block %X / flags %X / %s\n",
     508            thread_type_str( type ), pid, trdid, thread, blocked, flags, name );
     509        }
     510        else
     511        {
     512            nolock_printk(" - %s / pid %X / trdid %X / desc %X / block %X / flags %X\n",
     513            thread_type_str( type ), pid, trdid, thread, blocked, flags );
     514        }
     515
     516        // get next remote kernel thread list_entry
     517        k_entry = hal_remote_lpt( XPTR( cxy , &k_entry->next ) );
     518    }
     519
     520    // display user threads
     521    while( u_entry != &sched->u_root )
     522    {
     523        // get local pointer on user_thread
     524        thread = LIST_ELEMENT( u_entry , thread_t , sched_list );
     525
     526        // get relevant thead info
     527        thread_type_t type    = hal_remote_lw ( XPTR( cxy , &thread->type ) );
     528        trdid_t       trdid   = hal_remote_lw ( XPTR( cxy , &thread->trdid ) );
     529        uint32_t      blocked = hal_remote_lw ( XPTR( cxy , &thread->blocked ) );
     530        uint32_t      flags   = hal_remote_lw ( XPTR( cxy , &thread->flags ) );
     531        process_t *   process = hal_remote_lpt( XPTR( cxy , &thread->process ) );
     532        pid_t         pid     = hal_remote_lw ( XPTR( cxy , &process->pid ) );
     533
     534        nolock_printk(" - %s / pid %X / trdid %X / desc %X / block %X / flags %X\n",
     535        thread_type_str( type ), pid, trdid, thread, blocked, flags );
     536
     537        // get next user thread list_entry
     538        u_entry = hal_remote_lpt( XPTR( cxy , &u_entry->next ) );
     539    }
     540
     541    // release TXT0 lock
     542    remote_spinlock_unlock_busy( lock_xp , save_sr );
     543
     544}  // end sched_remote_display()
     545
  • trunk/kernel/kern/scheduler.h

    r443 r450  
    112112
    113113/*********************************************************************************************
    114  * This function display the internal state of the local core identified by its <lid>.
     114 * This debug function displays on TXT0 the internal state of a local scheduler,
     115 * identified by the core local index <lid>.
    115116 *********************************************************************************************
    116117 * @ lid      : local index of target core.
     
    118119void sched_display( lid_t lid );
    119120
     121/*********************************************************************************************
     122 * This debug function displays on TXT0 the internal state of a scheduler,
     123 * identified by the target cluster identifier <cxy> and the core local index <lid>.
     124 * It can be called by a thread running in any cluster, as it uses remote accesses,
     125 * to scan the scheduler local lists of threads.
     126 *********************************************************************************************
     127 * @ cxy      : target cluster identifier
     128 * @ lid      : local index of target core.
     129 ********************************************************************************************/
     130void sched_remote_display( cxy_t  cxy,
     131                           lid_t  lid );
    120132
    121133#endif  /* _SCHEDULER_H_ */
  • trunk/kernel/kern/thread.c

    r446 r450  
    654654if( DEBUG_THREAD_DESTROY < cycle )
    655655printk("\n[DBG] %s : thread %x enter to destroy thread %x in process %x / cycle %d\n",
    656 __FUNCTION__, CURRENT_THREAD, thread, process->pid, cycle );
     656__FUNCTION__, CURRENT_THREAD, thread->trdid, process->pid, cycle );
    657657#endif
    658658
     
    694694cycle = (uint32_t)hal_get_cycles();
    695695if( DEBUG_THREAD_DESTROY < cycle )
    696 printk("\n[DBG] %s : thread %x exit / destroyed thread %x in process %x / cycle %d\n",
    697 __FUNCTION__, CURRENT_THREAD, thread, process->pid, cycle );
     696printk("\n[DBG] %s : thread %x exit / destroyed thread %x in process %x / last %d / cycle %d\n",
     697__FUNCTION__, CURRENT_THREAD, thread->trdid, process->pid, last_thread / cycle );
    698698#endif
    699699
     
    800800uint32_t cycle = (uint32_t)hal_get_cycles();
    801801if( DEBUG_THREAD_BLOCK < cycle )
    802 printk("\n[@@@] %s : thread %x  in cxy %x blocked thread %x in cxy %x / cause %x / cycle %d\n",
     802printk("\n[DBG] %s : thread %x  in cxy %x blocked thread %x in cxy %x / cause %x / cycle %d\n",
    803803__FUNCTION__ , CURRENT_THREAD , local_cxy , ptr , cxy , cause , cycle );
    804 #endif
    805 
    806 #if (DEBUG_THREAD_BLOCK & 1)
    807 if( DEBUG_THREAD_BLOCK < cycle )
    808 {
    809     if( cxy == local_cxy)
    810     {
    811         sched_display( ptr->core->lid );
    812     }
    813     else
    814     {
    815         core_t * core = hal_remote_lpt( XPTR( cxy , &ptr->core ) );
    816         lid_t    lid  = hal_remote_lw ( XPTR( cxy , &core->lid ) );
    817         rpc_sched_display_client( cxy , lid );
    818     }
    819 }
    820804#endif
    821805
     
    837821uint32_t cycle = (uint32_t)hal_get_cycles();
    838822if( DEBUG_THREAD_BLOCK < cycle )
    839 printk("\n[@@@] %s : thread %x  in cxy %x unblocked thread %x in cxy %x / cause %x / cycle %d\n",
     823printk("\n[DBG] %s : thread %x  in cxy %x unblocked thread %x in cxy %x / cause %x / cycle %d\n",
    840824__FUNCTION__ , CURRENT_THREAD , local_cxy , ptr , cxy , cause , cycle );
    841 #endif
    842 
    843 #if (DEBUG_THREAD_BLOCK & 1)
    844 if( DEBUG_THREAD_BLOCK < cycle )
    845 {
    846     if( cxy == local_cxy)
    847     {
    848         sched_display( ptr->core->lid );
    849     }
    850     else
    851     {
    852         core_t * core = hal_remote_lpt( XPTR( cxy , &ptr->core ) );
    853         lid_t    lid  = hal_remote_lw ( XPTR( cxy , &core->lid ) );
    854         rpc_sched_display_client( cxy , lid );
    855     }
    856 }
    857825#endif
    858826
  • trunk/kernel/kernel_config.h

    r447 r450  
    9999#define DEBUG_PROCESS_REFERENCE_INIT   0
    100100#define DEBUG_PROCESS_SIGACTION        0
    101 #define DEBUG_PROCESS_TXT              2
     101#define DEBUG_PROCESS_TXT              0
    102102#define DEBUG_PROCESS_ZERO_CREATE      0
    103103
     
    116116#define DEBUG_RPC_VMM_GET_VSEG         0
    117117
    118 #define DEBUG_SCHED_HANDLE_SIGNALS     1
    119 #define DEBUG_SCHED_YIELD              1    // must be activated by the trace() syscall
     118#define DEBUG_SCHED_HANDLE_SIGNALS     2
     119#define DEBUG_SCHED_YIELD              2    // must be activated by the trace() syscall
    120120
    121121#define DEBUG_SYSCALLS_ERROR           2
     
    125125#define DEBUG_SYS_EXIT                 1
    126126#define DEBUG_SYS_FG                   0
    127 #define DEBUG_SYS_FORK                 1
     127#define DEBUG_SYS_FORK                 0
    128128#define DEBUG_SYS_GET_CONFIG           0
    129129#define DEBUG_SYS_ISATTY               0
    130130#define DEBUG_SYS_KILL                 0
    131131#define DEBUG_SYS_MMAP                 0
    132 #define DEBUG_SYS_READ                 2
     132#define DEBUG_SYS_READ                 0
    133133#define DEBUG_SYS_THREAD_CANCEL        0
    134134#define DEBUG_SYS_THREAD_CREATE        0
     
    140140#define DEBUG_SYS_TRACE                0
    141141#define DEBUG_SYS_WAIT                 0
    142 #define DEBUG_SYS_WRITE                2
     142#define DEBUG_SYS_WRITE                0
    143143
    144144#define DEBUG_SPINLOCKS                0
  • trunk/kernel/libk/list.h

    r440 r450  
    5353/***************************************************************************
    5454 * This structure defines a Double Circular Linked List entry.
    55  * Note : The list root is an extra list-entry_t, that is NOT part of the
    56  *        set of linked elements.
     55 * Note : The list root is an extra list-entry_t, that is NOT part
     56 *            of the set of linked elements.
    5757 **************************************************************************/
    5858
  • trunk/kernel/syscalls/sys_display.c

    r445 r450  
    193193        else
    194194        {
    195             rpc_sched_display_client( cxy , lid );
     195            sched_remote_display( cxy , lid );
    196196        }
    197197    }
  • trunk/kernel/syscalls/sys_read.c

    r446 r450  
    199199        }
    200200
    201 printk("\n###### in %s : thread %x in process %x got TXT_RX ownership\n",
    202 __FUNCTION__, this->trdid, process->pid );
    203                
    204201        // move count bytes from device
    205202        nbytes = devfs_user_move( true,             // from device to buffer
Note: See TracChangeset for help on using the changeset viewer.