source: trunk/kernel/kern/chdev.c @ 450

Last change on this file since 450 was 450, checked in by alain, 6 years ago

Fix a bug in function sched_handle_signal():
When the deleted user thread is the last executed thread,
the sched->u_last field must be updated to point on another user thread.

File size: 18.8 KB
RevLine 
[5]1/*
2 * chdev.c - channel device descriptor operations implementation.
3 *
4 * Authors  Alain Greiner   (2016)
5 *
6 * Copyright (c) UPMC Sorbonne Universites
7 *
8 * This file is part of ALMOS-MKH.
9 *
10 * ALMOS-MKH.is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; version 2.0 of the License.
13 *
14 * ALMOS-MKH.is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with ALMOS-MKH.; if not, write to the Free Software Foundation,
21 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 */
23
[14]24#include <kernel_config.h>
[5]25#include <hal_types.h>
26#include <hal_special.h>
[447]27#include <hal_remote.h>
[407]28#include <hal_irqmask.h>
[16]29#include <printk.h>
[5]30#include <boot_info.h>
31#include <xlist.h>
32#include <kmem.h>
[407]33#include <scheduler.h>
[5]34#include <thread.h>
35#include <rpc.h>
36#include <chdev.h>
[23]37#include <devfs.h>
[5]38
[317]39
40extern chdev_directory_t    chdev_dir;   // allocated in kernel_init.c
41
[438]42#if (DEBUG_SYS_READ & 1)
[435]43extern uint32_t enter_chdev_cmd_read;
44extern uint32_t exit_chdev_cmd_read;
45extern uint32_t enter_chdev_server_read;
46extern uint32_t exit_chdev_server_read;
[407]47#endif
[317]48
[438]49#if (DEBUG_SYS_WRITE & 1)
[435]50extern uint32_t enter_chdev_cmd_write;
51extern uint32_t exit_chdev_cmd_write;
52extern uint32_t enter_chdev_server_write;
53extern uint32_t exit_chdev_server_write;
54#endif
55
[5]56////////////////////////////////////////////
57char * chdev_func_str( uint32_t func_type ) 
58{
59        if     ( func_type == DEV_FUNC_RAM ) return "RAM";
60        else if( func_type == DEV_FUNC_ROM ) return "ROM";
61        else if( func_type == DEV_FUNC_FBF ) return "FBF";
62        else if( func_type == DEV_FUNC_IOB ) return "IOB";
63        else if( func_type == DEV_FUNC_IOC ) return "IOC";
64        else if( func_type == DEV_FUNC_MMC ) return "MMC";
65        else if( func_type == DEV_FUNC_DMA ) return "DMA";
66        else if( func_type == DEV_FUNC_NIC ) return "NIC";
67        else if( func_type == DEV_FUNC_TIM ) return "TIM";
68        else if( func_type == DEV_FUNC_TXT ) return "TXT";
69        else if( func_type == DEV_FUNC_ICU ) return "ICU";
70        else if( func_type == DEV_FUNC_PIC ) return "PIC";
[16]71    else                                 return "undefined";
[5]72}
73
74/////////////////////////////////////////
75chdev_t * chdev_create( uint32_t    func,
76                        uint32_t    impl,
77                        uint32_t    channel,
78                        uint32_t    is_rx,
79                        xptr_t      base )
80{
81    chdev_t    * chdev;
82    kmem_req_t   req;
83
84    // allocate memory for chdev
85    req.type   = KMEM_DEVICE;
86    req.flags  = AF_ZERO;
87    chdev      = (chdev_t *)kmem_alloc( &req );
88
89    if( chdev == NULL ) return NULL;
90
91    // initialize waiting threads queue and associated lock
92    remote_spinlock_init( XPTR( local_cxy , &chdev->wait_lock ) );
93    xlist_root_init( XPTR( local_cxy , &chdev->wait_root ) );
94
95    // initialize attributes
96    chdev->func    =  func;
97    chdev->impl    =  impl;
98    chdev->channel =  channel;
99    chdev->is_rx   =  is_rx;
100    chdev->base    =  base; 
101
102    return chdev;
103
104}  // end chdev_create()
105
106///////////////////////////////////
107void chdev_print( chdev_t * chdev )
108{
109    printk("\n - func      = %s"
110           "\n - channel   = %d"
111           "\n - base      = %l"
112           "\n - cmd       = %x"
113           "\n - isr       = %x"
114           "\n - chdev     = %x\n",
115           chdev_func_str(chdev->func),
116           chdev->channel,
117           chdev->base,
118           chdev->cmd,
119           chdev->isr,
120           chdev );
121}
122
[407]123//////////////////////////////////////////////////
124void chdev_register_command( xptr_t     chdev_xp )
[5]125{
[407]126    thread_t * server_ptr;    // local pointer on server thread associated to chdev
[440]127    xptr_t     server_xp;     // extended pointer on server thread
[407]128    core_t   * core_ptr;      // local pointer on core running the server thread
129    uint32_t   lid;           // core running the server thread local index
130    xptr_t     lock_xp;       // extended pointer on lock protecting the chdev queue
[408]131    uint32_t   different;     // non zero if server thread core != client thread core
[407]132    uint32_t   save_sr;       // for critical section
[5]133
[438]134#if (DEBUG_SYS_READ & 1)
[435]135enter_chdev_cmd_read = (uint32_t)hal_get_cycles();
[418]136#endif
137
[438]138#if (DEBUG_SYS_WRITE & 1)
[435]139enter_chdev_cmd_write = (uint32_t)hal_get_cycles();
140#endif
141
[407]142    thread_t * this = CURRENT_THREAD;
143
[440]144    // get chdev cluster and local pointer
[5]145    cxy_t     chdev_cxy = GET_CXY( chdev_xp );
[440]146    chdev_t * chdev_ptr = GET_PTR( chdev_xp );
[5]147
[440]148    // get local and extended pointers on server thread
149    server_ptr = (thread_t *)hal_remote_lpt( XPTR( chdev_cxy , &chdev_ptr->server) );
150    server_xp  = XPTR( chdev_cxy , server_ptr );
151
152    // get local pointer on core running the server thread
153    core_ptr   = (core_t *)hal_remote_lpt( XPTR( chdev_cxy , &server_ptr->core ) );
154
155    // get server core local index
156    lid = hal_remote_lw( XPTR( chdev_cxy , &core_ptr->lid ) );
157
[438]158#if (DEBUG_CHDEV_CMD_RX || DEBUG_CHDEV_CMD_TX)
[437]159bool_t is_rx = hal_remote_lw( XPTR( chdev_cxy , &chdev_ptr->is_rx ) );
160#endif
161   
[438]162#if DEBUG_CHDEV_CMD_RX
[437]163uint32_t rx_cycle = (uint32_t)hal_get_cycles();
[438]164if( (is_rx) && (DEBUG_CHDEV_CMD_RX < rx_cycle) )
[450]165printk("\n[DBG] %s : client_thread %x (%s) enter for RX / server = %x / cycle %d\n",
166__FUNCTION__, this, thread_type_str(this->type) , server_ptr, rx_cycle );
[437]167#endif
168
[438]169#if DEBUG_CHDEV_CMD_TX
[437]170uint32_t tx_cycle = (uint32_t)hal_get_cycles();
[438]171if( (is_rx == 0) && (DEBUG_CHDEV_CMD_TX < tx_cycle) )
[450]172printk("\n[DBG] %s : client_thread %x (%s) enter for TX / server = %x / cycle %d\n",
173__FUNCTION__, this, thread_type_str(this->type) , server_ptr, tx_cycle );
[437]174#endif
175
[440]176    // build extended pointer on client thread xlist
177    xptr_t  list_xp    = XPTR( local_cxy , &this->wait_list );
[5]178
[440]179    // build extended pointer on chdev waiting queue root
180    xptr_t  root_xp    = XPTR( chdev_cxy , &chdev_ptr->wait_root );
[5]181
[440]182    // build extended pointer on server thread blocked state
183    xptr_t  blocked_xp = XPTR( chdev_cxy , &server_ptr->blocked );
184
185    // build extended pointer on lock protecting chdev waiting queue
[407]186    lock_xp = XPTR( chdev_cxy , &chdev_ptr->wait_lock );
187
[450]188    // critical section for the following sequence:
[440]189    // (1) take the lock protecting waiting queue
190    // (2) block the client thread
191    // (3) unblock the server thread if required
192    // (4) register client thread in server queue
193    // (5) send IPI to force server scheduling
194    // (6) release the lock protecting waiting queue
195    // (7) deschedule
196    // ... in this order
[407]197
[440]198    // enter critical section
199    hal_disable_irq( &save_sr );
[407]200
[446]201    // take the lock protecting chdev waiting queue
[440]202    remote_spinlock_lock( lock_xp );
[408]203
204    // block current thread
[436]205    thread_block( XPTR( local_cxy , CURRENT_THREAD ) , THREAD_BLOCKED_IO );
[408]206
[450]207#if (DEBUG_CHDEV_CMD_TX & 1)
208if( (is_rx == 0) && (DEBUG_CHDEV_CMD_TX < tx_cycle) )
209printk("\n[DBG] in %s : client thread %x blocked\n", __FUNCTION__, this );
210#endif
211
[446]212    // unblock server thread if required
[440]213    if( hal_remote_lw( blocked_xp ) & THREAD_BLOCKED_IDLE )
214    thread_unblock( server_xp , THREAD_BLOCKED_IDLE );
215
[450]216#if (DEBUG_CHDEV_CMD_TX & 1)
217if( (is_rx == 0) && (DEBUG_CHDEV_CMD_TX < tx_cycle) )
218{
219printk("\n[DBG] in %s : server thread %x unblocked\n", __FUNCTION__, server_ptr );
220chdev_queue_display( chdev_xp );
221}
222#endif
223
[5]224    // register client thread in waiting queue
[407]225    xlist_add_last( root_xp , list_xp );
[5]226
[450]227#if (DEBUG_CHDEV_CMD_TX & 1)
228if( (is_rx == 0)  && (DEBUG_CHDEV_CMD_TX < tx_cycle) )
229{
230printk("\n[DBG] in %s : thread %x registered write request in chdev\n", __FUNCTION__, this );
231chdev_queue_display( chdev_xp );
232}
233#endif
234 
[440]235    // send IPI to core running the server thread when server != client
236    different = (lid != this->core->lid) || (local_cxy != chdev_cxy);
[450]237    if( different )
238    {
239        dev_pic_send_ipi( chdev_cxy , lid ); 
[407]240   
[450]241#if (DEBUG_CHDEV_CMD_TX & 1)
242if( (is_rx == 0)  && (DEBUG_CHDEV_CMD_TX < tx_cycle) )
243printk("\n[DBG] in %s : client thread %x sent IPI to server thread %x\n",
244__FUNCTION__, this, server_ptr );
245#endif
246
247    }
248 
[440]249    // release lock
250    remote_spinlock_unlock( lock_xp );
251
[408]252    // deschedule
253    assert( thread_can_yield( this ) , __FUNCTION__ , "illegal sched_yield\n" );
254    sched_yield("blocked on I/O");
[407]255
256    // exit critical section
257    hal_restore_irq( save_sr );
258
[438]259#if DEBUG_CHDEV_CMD_RX
[437]260rx_cycle = (uint32_t)hal_get_cycles();
[438]261if( (is_rx) && (DEBUG_CHDEV_CMD_RX < rx_cycle) )
[437]262printk("\n[DBG] %s : client_thread %x (%s) exit for RX / cycle %d\n",
263__FUNCTION__, this, thread_type_str(this->type) , rx_cycle );
[433]264#endif
265
[438]266#if DEBUG_CHDEV_CMD_TX
[437]267tx_cycle = (uint32_t)hal_get_cycles();
[438]268if( (is_rx == 0) && (DEBUG_CHDEV_CMD_TX < tx_cycle) )
[437]269printk("\n[DBG] %s : client_thread %x (%s) exit for TX / cycle %d\n",
270__FUNCTION__, this, thread_type_str(this->type) , tx_cycle );
271#endif
272
[438]273#if (DEBUG_SYS_READ & 1)
[435]274exit_chdev_cmd_read = (uint32_t)hal_get_cycles();
[418]275#endif
276
[438]277#if (DEBUG_SYS_WRITE & 1)
[435]278exit_chdev_cmd_write = (uint32_t)hal_get_cycles();
279#endif
280
[5]281}  // end chdev_register_command()
282
283///////////////////////////////////////////////
284void chdev_sequencial_server( chdev_t * chdev )
285{
286    xptr_t          client_xp;    // extended pointer on waiting thread
287    cxy_t           client_cxy;   // cluster of client thread
288    thread_t      * client_ptr;   // local pointer on client thread
[407]289    thread_t      * server;       // local pointer on server thread
[5]290    xptr_t          root_xp;      // extended pointer on device waiting queue root
[407]291    xptr_t          lock_xp;      // extended pointer on lock ptotecting chdev queue
[5]292
293    server = CURRENT_THREAD;
294
[437]295    // get root and lock on command queue
[5]296    root_xp = XPTR( local_cxy , &chdev->wait_root );
[407]297    lock_xp = XPTR( local_cxy , &chdev->wait_lock );
[5]298
[407]299        // This infinite loop is executed by the DEV thread
300    // to handle commands registered in the chdev queue.
[5]301    while( 1 )
302    {
[407]303        // get the lock protecting the waiting queue
304        remote_spinlock_lock( lock_xp );
305
[5]306        // check waiting queue state
[407]307        if( xlist_is_empty( root_xp ) ) // waiting queue empty
[5]308        {
309            // release lock
[407]310            remote_spinlock_unlock( lock_xp );
[5]311
[440]312            // block
313            thread_block( XPTR( local_cxy , server ) , THREAD_BLOCKED_IDLE ); 
314
[408]315            // deschedule
[440]316            assert( thread_can_yield( server ) , __FUNCTION__ , "illegal sched_yield\n" );
[408]317            sched_yield("I/O queue empty");
[5]318        } 
[407]319        else                            // waiting queue not empty
[5]320        {
[407]321            // get extended pointer on first client thread
322            client_xp = XLIST_FIRST_ELEMENT( root_xp , thread_t , wait_list );
[5]323
[440]324            // get client thread cluster and local pointer
[407]325            client_cxy = GET_CXY( client_xp );
[440]326            client_ptr = GET_PTR( client_xp );
[407]327
[440]328            // remove this first client thread from waiting queue
329            xlist_unlink( XPTR( client_cxy , &client_ptr->wait_list ) );
330
331            // release lock
332            remote_spinlock_unlock( lock_xp );
333
[438]334#if DEBUG_CHDEV_SERVER_RX
[437]335uint32_t rx_cycle = (uint32_t)hal_get_cycles();
[438]336if( (chdev->is_rx) && (DEBUG_CHDEV_SERVER_RX < rx_cycle) )
[437]337printk("\n[DBG] %s : server_thread %x start RX / client %x / cycle %d\n",
338__FUNCTION__ , server , client_ptr , rx_cycle );
339#endif
340
[438]341#if DEBUG_CHDEV_SERVER_TX
[437]342uint32_t tx_cycle = (uint32_t)hal_get_cycles();
[438]343if( (chdev->is_rx == 0) && (DEBUG_CHDEV_SERVER_TX < tx_cycle) )
[437]344printk("\n[DBG] %s : server_thread %x start TX / client %x / cycle %d\n",
345__FUNCTION__ , server , client_ptr , tx_cycle );
346#endif
347
[438]348#if (DEBUG_SYS_READ & 1)
[437]349enter_chdev_server_read = (uint32_t)hal_get_cycles();
350#endif
351
[438]352#if (DEBUG_SYS_WRITE & 1)
[437]353enter_chdev_server_write = (uint32_t)hal_get_cycles();
354#endif
355
[407]356            // call driver command function to execute I/O operation
357            chdev->cmd( client_xp );
[5]358       
[418]359            // unblock client thread
360            thread_unblock( client_xp , THREAD_BLOCKED_IO );
361
[438]362#if DEBUG_CHDEV_SERVER_RX
[437]363rx_cycle = (uint32_t)hal_get_cycles();
[438]364if( (chdev->is_rx) && (DEBUG_CHDEV_SERVER_RX < rx_cycle) )
[437]365printk("\n[DBG] %s : server_thread %x completes RX / client %x / cycle %d\n",
366__FUNCTION__ , server , client_ptr , rx_cycle );
[433]367#endif
[5]368
[438]369#if DEBUG_CHDEV_SERVER_TX
[437]370tx_cycle = (uint32_t)hal_get_cycles();
[438]371if( (chdev->is_rx == 0) && (DEBUG_CHDEV_SERVER_TX < tx_cycle) )
[437]372printk("\n[DBG] %s : server_thread %x completes TX / client %x / cycle %d\n",
373__FUNCTION__ , server , client_ptr , tx_cycle );
374#endif
375
[438]376#if (DEBUG_SYS_READ & 1)
[435]377exit_chdev_server_read = (uint32_t)hal_get_cycles();
[407]378#endif
379
[438]380#if (DEBUG_SYS_WRITE & 1)
[435]381exit_chdev_server_write = (uint32_t)hal_get_cycles();
382#endif
383
[407]384        }
[5]385    }  // end while
386}  // end chdev_sequencial_server()
387
[428]388////////////////////////////////////////
389xptr_t chdev_from_file( xptr_t file_xp )
390{
391    cxy_t         file_cxy;
392    vfs_file_t  * file_ptr;
393    uint32_t      inode_type;
394    vfs_inode_t * inode_ptr;
395    chdev_t     * chdev_ptr;
396
[440]397    assert( (file_xp != XPTR_NULL) , __FUNCTION__,
398    "file_xp == XPTR_NULL\n" );
399
[428]400    // get cluster and local pointer on remote file descriptor
401    // associated inode and chdev are stored in same cluster as the file desc.
402    file_cxy  = GET_CXY( file_xp );
403    file_ptr  = (vfs_file_t *)GET_PTR( file_xp );
404
405    // get inode type from file descriptor
406    inode_type = hal_remote_lw( XPTR( file_cxy , &file_ptr->type ) );
407    inode_ptr  = (vfs_inode_t *)hal_remote_lpt( XPTR( file_cxy , &file_ptr->inode ) );
408
409    assert( (inode_type == INODE_TYPE_DEV) , __FUNCTION__ ,
[440]410    "inode type %d is not INODE_TYPE_DEV\n", inode_type );
[428]411
412    // get chdev local pointer from inode extension
413    chdev_ptr = (chdev_t *)hal_remote_lpt( XPTR( file_cxy , &inode_ptr->extend ) );
414
415    return XPTR( file_cxy , chdev_ptr );
416
417}  // end chdev_from_file()
418
[317]419////////////////////////
420void chdev_dir_display()
421{
[428]422    uint32_t  i;
423    cxy_t     cxy;
424    chdev_t * ptr;
425    uint32_t  base;
426    reg_t     save_sr;
[317]427
[428]428    // get pointers on TXT0 chdev
429    xptr_t    txt0_xp  = chdev_dir.txt_tx[0];
430    cxy_t     txt0_cxy = GET_CXY( txt0_xp );
431    chdev_t * txt0_ptr = GET_PTR( txt0_xp );
[317]432
[428]433    // get extended pointer on remote TXT0 chdev lock
434    xptr_t  lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock );
[317]435
[428]436    // get TXT0 lock in busy waiting mode
437    remote_spinlock_lock_busy( lock_xp , &save_sr );
[317]438
[428]439    // header
440    nolock_printk("\n***** external chdevs directory *****\n");
[317]441
[428]442    // IOB
443    cxy  = GET_CXY( chdev_dir.iob );
444    ptr  = GET_PTR( chdev_dir.iob );
445    base = (uint32_t)hal_remote_lwd( XPTR( cxy , &ptr->base ) );
446    nolock_printk("  - iob       : cxy = %X / ptr = %X / base = %X\n", cxy, ptr, base);
[407]447
[428]448    // PIC
449    cxy  = GET_CXY( chdev_dir.pic );
450    ptr  = GET_PTR( chdev_dir.pic );
451    base = (uint32_t)hal_remote_lwd( XPTR( cxy , &ptr->base ) );
452    nolock_printk("  - pic       : cxy = %X / ptr = %X / base = %X\n", cxy, ptr, base);
[407]453
[428]454    // TXT
455    for( i = 0 ; i < LOCAL_CLUSTER->nb_txt_channels ; i++ )
456    {
457        cxy = GET_CXY( chdev_dir.txt_rx[i] );
458        ptr = GET_PTR( chdev_dir.txt_rx[i] );
459        base = (uint32_t)hal_remote_lwd( XPTR( cxy , &ptr->base ) );
460        nolock_printk("  - txt_rx[%d] : cxy = %X / ptr = %X / base = %X\n", i, cxy, ptr, base);
[407]461
[428]462        cxy = GET_CXY( chdev_dir.txt_tx[i] );
463        ptr = GET_PTR( chdev_dir.txt_tx[i] );
464        base = (uint32_t)hal_remote_lwd( XPTR( cxy , &ptr->base ) );
465        nolock_printk("  - txt_tx[%d] : cxy = %X / ptr = %X / base = %X\n", i, cxy, ptr, base);
466    }
[317]467
[428]468    // IOC
469    for( i = 0 ; i < LOCAL_CLUSTER->nb_ioc_channels ; i++ )
470    {
471        cxy = GET_CXY( chdev_dir.ioc[i] );
472        ptr = GET_PTR( chdev_dir.ioc[i] );
473        base = (uint32_t)hal_remote_lwd( XPTR( cxy , &ptr->base ) );
474        nolock_printk("  - ioc[%d]    : cxy = %X / ptr = %X / base = %X\n", i, cxy, ptr, base);
475    }
[317]476
[428]477    // FBF
478    for( i = 0 ; i < LOCAL_CLUSTER->nb_fbf_channels ; i++ )
479    {
480        cxy  = GET_CXY( chdev_dir.fbf[i] );
481        ptr  = GET_PTR( chdev_dir.fbf[i] );
482        base = (uint32_t)hal_remote_lwd( XPTR( cxy , &ptr->base ) );
483        nolock_printk("  - fbf[%d]    : cxy = %X / ptr = %X / base = %X\n", i, cxy, ptr, base);
484    }
[317]485
[428]486    // NIC
487    for( i = 0 ; i < LOCAL_CLUSTER->nb_nic_channels ; i++ )
488    {
489        cxy = GET_CXY( chdev_dir.nic_rx[i] );
490        ptr = GET_PTR( chdev_dir.nic_rx[i] );
491        base = (uint32_t)hal_remote_lwd( XPTR( cxy , &ptr->base ) );
492        nolock_printk("  - nic_rx[%d] : cxy = %X / ptr = %X / base = %X\n", i, cxy, ptr, base);
[317]493
[428]494        cxy = GET_CXY( chdev_dir.nic_tx[i] );
495        ptr = GET_PTR( chdev_dir.nic_tx[i] );
496        base = (uint32_t)hal_remote_lwd( XPTR( cxy , &ptr->base ) );
497        nolock_printk("  - nic_tx[%d] : cxy = %X / ptr = %X / base = %X\n", i, cxy, ptr, base);
498    }
[317]499
[428]500    // release lock
501    remote_spinlock_unlock_busy( lock_xp , save_sr );
502
[317]503}  // end chdev_dir_display()
504
[447]505///////////////////////////////////////////
506void chdev_queue_display( xptr_t chdev_xp )
507{
508    cxy_t       chdev_cxy;          // chdev cluster
509    chdev_t   * chdev_ptr;          // chdev local pointer
510    xptr_t      root_xp;            // extended pointer on waiting queuue root
511    char        name[16];           // local copie of chdev name
512    xptr_t      iter_xp;            // extended pointer on xlist_t field in waiting thread
513    xptr_t      thread_xp;          // extended pointer on thread registered in queue
514    cxy_t       thread_cxy;         // cluster identifier for waiting thread
515    thread_t  * thread_ptr;         // local pointer on waiting thread
516    trdid_t     trdid;              // waiting thread identifier
517    process_t * process;            // waiting thread process descriptor
518    pid_t       pid;                // waiting thread process identifier
519
520    // get cluster and local pointer on chdev
521    chdev_cxy = GET_CXY( chdev_xp );
522    chdev_ptr = GET_PTR( chdev_xp );
523
524    // get extended pointer on root of requests queue
[450]525    root_xp = XPTR( chdev_cxy , &chdev_ptr->wait_root );
[447]526
527    // get chdev name
528    hal_remote_strcpy( XPTR( local_cxy , name ), XPTR( chdev_cxy , chdev_ptr->name ) );
529
530    // check queue empty
531    if( xlist_is_empty( root_xp ) )
532    {
533        printk("\n***** Waiting queue empty for chdev %s\n", name ); 
534    }
535    else
536    {
537        printk("\n***** Waiting queue for chdev %s\n", name ); 
538
539        // scan the waiting queue
540        XLIST_FOREACH( root_xp , iter_xp )
541        {
542            thread_xp  = XLIST_ELEMENT( iter_xp , thread_t , wait_list );
543            thread_cxy = GET_CXY( thread_xp );
544            thread_ptr = GET_PTR( thread_xp );
545            trdid      = hal_remote_lw ( XPTR( thread_cxy , &thread_ptr->trdid   ) );
546            process    = hal_remote_lpt( XPTR( thread_cxy , &thread_ptr->process ) );
547                        pid        = hal_remote_lw ( XPTR( thread_cxy , &process->pid        ) );
548
[450]549            printk("- thread %X / cluster %X / trdid %X / pid %X\n",
550            thread_ptr, thread_cxy, trdid, pid );
[447]551        }
552    }
553}  // end chdev_queue_display()
554
Note: See TracBrowser for help on using the repository browser.