source: trunk/kernel/kern/chdev.c @ 642

Last change on this file since 642 was 635, checked in by alain, 5 years ago

This version is a major evolution: The physical memory allocators,
defined in the kmem.c, ppm.c, and kcm.c files have been modified
to support remote accesses. The RPCs that were previously user
to allocate physical memory in a remote cluster have been removed.
This has been done to cure a dead-lock in case of concurrent page-faults.

This version 2.2 has been tested on a (4 clusters / 2 cores per cluster)
TSAR architecture, for both the "sort" and the "fft" applications.

File size: 21.7 KB
RevLine 
[5]1/*
2 * chdev.c - channel device descriptor operations implementation.
3 *
4 * Authors  Alain Greiner   (2016)
5 *
6 * Copyright (c) UPMC Sorbonne Universites
7 *
8 * This file is part of ALMOS-MKH.
9 *
10 * ALMOS-MKH.is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; version 2.0 of the License.
13 *
14 * ALMOS-MKH.is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with ALMOS-MKH.; if not, write to the Free Software Foundation,
21 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 */
23
[14]24#include <kernel_config.h>
[457]25#include <hal_kernel_types.h>
[5]26#include <hal_special.h>
[447]27#include <hal_remote.h>
[407]28#include <hal_irqmask.h>
[16]29#include <printk.h>
[5]30#include <boot_info.h>
31#include <xlist.h>
32#include <kmem.h>
[407]33#include <scheduler.h>
[5]34#include <thread.h>
35#include <rpc.h>
36#include <chdev.h>
[23]37#include <devfs.h>
[5]38
[564]39//////////////////////////////////////////////////////////////////////////////////////
40// Extern global variables
41//////////////////////////////////////////////////////////////////////////////////////
[317]42
[564]43extern chdev_directory_t    chdev_dir;         // allocated in kernel_init.c
[317]44
[564]45
[438]46#if (DEBUG_SYS_READ & 1)
[435]47extern uint32_t enter_chdev_cmd_read;
48extern uint32_t exit_chdev_cmd_read;
49extern uint32_t enter_chdev_server_read;
50extern uint32_t exit_chdev_server_read;
[407]51#endif
[317]52
[438]53#if (DEBUG_SYS_WRITE & 1)
[435]54extern uint32_t enter_chdev_cmd_write;
55extern uint32_t exit_chdev_cmd_write;
56extern uint32_t enter_chdev_server_write;
57extern uint32_t exit_chdev_server_write;
58#endif
59
[5]60////////////////////////////////////////////
61char * chdev_func_str( uint32_t func_type ) 
62{
[564]63    switch ( func_type ) 
64    {
[583]65        case DEV_FUNC_RAM: return "RAM";
66        case DEV_FUNC_ROM: return "ROM";
67        case DEV_FUNC_FBF: return "FBF";
68        case DEV_FUNC_IOB: return "IOB";
69        case DEV_FUNC_IOC: return "IOC";
70        case DEV_FUNC_MMC: return "MMC";
71        case DEV_FUNC_DMA: return "DMA";
72        case DEV_FUNC_NIC: return "NIC";
73        case DEV_FUNC_TIM: return "TIM";
74        case DEV_FUNC_TXT: return "TXT";
75        case DEV_FUNC_ICU: return "ICU";
76        case DEV_FUNC_PIC: return "PIC";
77        default:           return "undefined";
[527]78    }
[5]79}
80
81/////////////////////////////////////////
82chdev_t * chdev_create( uint32_t    func,
83                        uint32_t    impl,
84                        uint32_t    channel,
85                        uint32_t    is_rx,
86                        xptr_t      base )
87{
88    chdev_t    * chdev;
89    kmem_req_t   req;
90
91    // allocate memory for chdev
[635]92    req.type   = KMEM_KCM;
93    req.order  = bits_log2( sizeof(chdev_t) );
94    req.flags  = AF_ZERO | AF_KERNEL;
95    chdev      = kmem_alloc( &req );
[5]96
97    if( chdev == NULL ) return NULL;
98
[564]99    // initialize lock
100    remote_busylock_init( XPTR( local_cxy , &chdev->wait_lock ), LOCK_CHDEV_QUEUE );
101
102    // initialise waiting queue
[5]103    xlist_root_init( XPTR( local_cxy , &chdev->wait_root ) );
104
105    // initialize attributes
106    chdev->func    =  func;
107    chdev->impl    =  impl;
108    chdev->channel =  channel;
109    chdev->is_rx   =  is_rx;
110    chdev->base    =  base; 
111
112    return chdev;
113
114}  // end chdev_create()
115
116///////////////////////////////////
117void chdev_print( chdev_t * chdev )
118{
119    printk("\n - func      = %s"
120           "\n - channel   = %d"
121           "\n - base      = %l"
122           "\n - cmd       = %x"
123           "\n - isr       = %x"
124           "\n - chdev     = %x\n",
125           chdev_func_str(chdev->func),
126           chdev->channel,
127           chdev->base,
128           chdev->cmd,
129           chdev->isr,
130           chdev );
131}
132
[407]133//////////////////////////////////////////////////
134void chdev_register_command( xptr_t     chdev_xp )
[5]135{
[407]136    thread_t * server_ptr;    // local pointer on server thread associated to chdev
[440]137    xptr_t     server_xp;     // extended pointer on server thread
[407]138    core_t   * core_ptr;      // local pointer on core running the server thread
[457]139    uint32_t   server_lid;    // core running the server thread local index
[564]140    xptr_t     lock_xp;       // extended pointer on lock protecting the chdev state
[5]141
[438]142#if (DEBUG_SYS_READ & 1)
[435]143enter_chdev_cmd_read = (uint32_t)hal_get_cycles();
[418]144#endif
145
[438]146#if (DEBUG_SYS_WRITE & 1)
[435]147enter_chdev_cmd_write = (uint32_t)hal_get_cycles();
148#endif
149
[407]150    thread_t * this = CURRENT_THREAD;
151
[440]152    // get chdev cluster and local pointer
[5]153    cxy_t     chdev_cxy = GET_CXY( chdev_xp );
[440]154    chdev_t * chdev_ptr = GET_PTR( chdev_xp );
[5]155
[581]156    // check calling thread can yield
157    thread_assert_can_yield( this , __FUNCTION__ );
[564]158
[440]159    // get local and extended pointers on server thread
160    server_ptr = (thread_t *)hal_remote_lpt( XPTR( chdev_cxy , &chdev_ptr->server) );
161    server_xp  = XPTR( chdev_cxy , server_ptr );
162
163    // get local pointer on core running the server thread
164    core_ptr   = (core_t *)hal_remote_lpt( XPTR( chdev_cxy , &server_ptr->core ) );
165
166    // get server core local index
[564]167    server_lid = hal_remote_l32( XPTR( chdev_cxy , &core_ptr->lid ) );
[440]168
[438]169#if (DEBUG_CHDEV_CMD_RX || DEBUG_CHDEV_CMD_TX)
[619]170bool_t      is_rx        = hal_remote_l32( XPTR( chdev_cxy , &chdev_ptr->is_rx ) );
171trdid_t     server_trdid = hal_remote_l32( XPTR( chdev_cxy , &server_ptr->trdid ) ); 
172process_t * process_ptr  = hal_remote_lpt( XPTR( chdev_cxy , &server_ptr->process ) );
173pid_t       server_pid   = hal_remote_l32( XPTR( chdev_cxy , &process_ptr->pid ) );
[437]174#endif
175   
[438]176#if DEBUG_CHDEV_CMD_RX
[437]177uint32_t rx_cycle = (uint32_t)hal_get_cycles();
[438]178if( (is_rx) && (DEBUG_CHDEV_CMD_RX < rx_cycle) )
[625]179printk("\n[%s] client thread[%x,%x] enter for RX / server[%x,%x] / cycle %d\n",
[619]180__FUNCTION__, this->process->pid, this->trdid, server_pid, server_trdid, rx_cycle );
[437]181#endif
182
[438]183#if DEBUG_CHDEV_CMD_TX
[437]184uint32_t tx_cycle = (uint32_t)hal_get_cycles();
[438]185if( (is_rx == 0) && (DEBUG_CHDEV_CMD_TX < tx_cycle) )
[625]186printk("\n[%s] client thread[%x,%x] enter for TX / server[%x,%x] / cycle %d\n",
[619]187__FUNCTION__, this->process->pid, this->trdid, server_pid, server_trdid, tx_cycle );
[437]188#endif
189
[440]190    // build extended pointer on client thread xlist
191    xptr_t  list_xp    = XPTR( local_cxy , &this->wait_list );
[5]192
[440]193    // build extended pointer on chdev waiting queue root
194    xptr_t  root_xp    = XPTR( chdev_cxy , &chdev_ptr->wait_root );
[5]195
[440]196    // build extended pointer on lock protecting chdev waiting queue
[564]197    lock_xp            = XPTR( chdev_cxy , &chdev_ptr->wait_lock );
[407]198
[625]199    // The following actions execute in critical section,
200    // because the lock_acquire / lock_release :
[564]201    // (1) take the lock protecting the chdev state
[625]202    // (2) register client thread in server queue
203    // (3) unblock the server thread and block client thread
204    // (4) send IPI to force server scheduling
205    // (5) release the lock protecting waiting queue
[407]206
[625]207    // 1. take the lock protecting chdev queue
[564]208    remote_busylock_acquire( lock_xp );
[408]209
[625]210    // 2. register client thread in waiting queue
211    xlist_add_last( root_xp , list_xp );
[408]212
[450]213#if (DEBUG_CHDEV_CMD_TX & 1)
[625]214if( (is_rx == 0)  && (DEBUG_CHDEV_CMD_TX < tx_cycle) )
215printk("\n[%s] client thread[%x,%x] registered write request in chdev\n",
[601]216__FUNCTION__, this->process->pid, this->trdid );
[450]217#endif
[625]218 
[457]219#if (DEBUG_CHDEV_CMD_RX & 1)
[625]220if( (is_rx)  && (DEBUG_CHDEV_CMD_RX < rx_cycle) )
221printk("\n[%s] client thread[%x,%x] registered read request in chdev\n",
222__FUNCTION__, this->process->pid, this->trdid );
[457]223#endif
[625]224 
225    // 3. client thread unblocks server thread and blocks itself
[440]226    thread_unblock( server_xp , THREAD_BLOCKED_IDLE );
[625]227    thread_block( XPTR( local_cxy , CURRENT_THREAD ) , THREAD_BLOCKED_IO );
[440]228
[450]229#if (DEBUG_CHDEV_CMD_TX & 1)
230if( (is_rx == 0) && (DEBUG_CHDEV_CMD_TX < tx_cycle) )
[625]231printk("\n[%s] client thread[%x,%x] unblock server thread[%x,%x] and block itsef\n",
232__FUNCTION__, this->process->pid, this->trdid, server_pid, server_trdid );
[450]233#endif
234
[457]235#if (DEBUG_CHDEV_CMD_RX & 1)
236if( (is_rx) && (DEBUG_CHDEV_CMD_RX < rx_cycle) )
[625]237printk("\n[%s] client thread[%x,%x] unblock server thread[%x,%x] and block itsef\n",
238__FUNCTION__, this->process->pid, this->trdid, server_pid, server_trdid );
[457]239#endif
240
[625]241    // 4. send IPI to core running the server thread when server core != client core
[457]242    if( (server_lid != this->core->lid) || (local_cxy != chdev_cxy) )
[450]243    {
[457]244        dev_pic_send_ipi( chdev_cxy , server_lid ); 
245
[450]246#if (DEBUG_CHDEV_CMD_TX & 1)
247if( (is_rx == 0)  && (DEBUG_CHDEV_CMD_TX < tx_cycle) )
[593]248printk("\n[%s] client thread[%x,%x] sent IPI to TX server thread[%x,%x]\n",
[619]249__FUNCTION__, this->process->pid, this->trdid, server_pid, server_trdid );
[450]250#endif
251
[457]252#if (DEBUG_CHDEV_CMD_RX & 1)
253if( (is_rx)  && (DEBUG_CHDEV_CMD_RX < rx_cycle) )
[593]254printk("\n[%s] client thread[%x,%x] sent IPI to RX server thread[%x,%x]\n",
[619]255__FUNCTION__, this->process->pid, this->trdid, server_pid, server_trdid );
[457]256#endif
257
[450]258    }
259 
[625]260    // 5. release lock protecting chdev queue
[564]261    remote_busylock_release( lock_xp );
[440]262
[408]263    // deschedule
264    sched_yield("blocked on I/O");
[407]265
[438]266#if DEBUG_CHDEV_CMD_RX
[437]267rx_cycle = (uint32_t)hal_get_cycles();
[438]268if( (is_rx) && (DEBUG_CHDEV_CMD_RX < rx_cycle) )
[625]269printk("\n[%s] client thread[%x,%x] exit for RX / cycle %d\n",
[593]270__FUNCTION__, this->process->pid, this->trdid, rx_cycle );
[433]271#endif
272
[438]273#if DEBUG_CHDEV_CMD_TX
[437]274tx_cycle = (uint32_t)hal_get_cycles();
[438]275if( (is_rx == 0) && (DEBUG_CHDEV_CMD_TX < tx_cycle) )
[625]276printk("\n[%s] client thread[%x,%x] exit for TX / cycle %d\n",
[593]277__FUNCTION__, this->process->pid, this->trdid, tx_cycle );
[437]278#endif
279
[438]280#if (DEBUG_SYS_READ & 1)
[435]281exit_chdev_cmd_read = (uint32_t)hal_get_cycles();
[418]282#endif
283
[438]284#if (DEBUG_SYS_WRITE & 1)
[435]285exit_chdev_cmd_write = (uint32_t)hal_get_cycles();
286#endif
287
[5]288}  // end chdev_register_command()
289
[619]290/////////////////////////////////////////
291void chdev_server_func( chdev_t * chdev )
[5]292{
293    xptr_t          client_xp;    // extended pointer on waiting thread
294    cxy_t           client_cxy;   // cluster of client thread
295    thread_t      * client_ptr;   // local pointer on client thread
[407]296    thread_t      * server;       // local pointer on server thread
[5]297    xptr_t          root_xp;      // extended pointer on device waiting queue root
[407]298    xptr_t          lock_xp;      // extended pointer on lock ptotecting chdev queue
[5]299
300    server = CURRENT_THREAD;
301
[564]302    // build extended pointer on root of client threads queue
[5]303    root_xp = XPTR( local_cxy , &chdev->wait_root );
[564]304
305    // build extended pointer on lock protecting client threads queue
[407]306    lock_xp = XPTR( local_cxy , &chdev->wait_lock );
[5]307
[407]308        // This infinite loop is executed by the DEV thread
309    // to handle commands registered in the chdev queue.
[5]310    while( 1 )
311    {
[564]312
[619]313#if( DEBUG_CHDEV_SERVER_RX || DEBUG_CHDEV_SERVER_TX )
[564]314uint32_t rx_cycle = (uint32_t)hal_get_cycles();
315if( (chdev->is_rx) && (DEBUG_CHDEV_SERVER_RX < rx_cycle) )
[625]316printk("\n[%s] server thread[%x,%x] check TXT_RX channel %d / cycle %d\n",
[619]317__FUNCTION__ , server->process->pid, server->trdid, chdev->channel, rx_cycle );
[564]318#endif
319
320#if DEBUG_CHDEV_SERVER_TX
321uint32_t tx_cycle = (uint32_t)hal_get_cycles();
322if( (chdev->is_rx == 0) && (DEBUG_CHDEV_SERVER_TX < tx_cycle) )
[619]323printk("\n[%s] thread[%x,%x] check TXT_TX channel %d / cycle %d\n",
324__FUNCTION__ , server->process->pid, server->trdid, chdev->channel, tx_cycle );
[564]325#endif
326
[601]327        // check server thread can yield
328        thread_assert_can_yield( server , __FUNCTION__ );
329
[407]330        // get the lock protecting the waiting queue
[564]331        remote_busylock_acquire( lock_xp );
[407]332
[5]333        // check waiting queue state
[407]334        if( xlist_is_empty( root_xp ) ) // waiting queue empty
[5]335        {
[601]336            // release lock protecting the waiting queue
337            remote_busylock_release( lock_xp );
[564]338
339#if DEBUG_CHDEV_SERVER_RX
340rx_cycle = (uint32_t)hal_get_cycles();
341if( (chdev->is_rx) && (DEBUG_CHDEV_SERVER_RX < rx_cycle) )
[625]342printk("\n[%s] server thread[%x,%x] found RX queue empty => blocks / cycle %d\n",
[593]343__FUNCTION__ , server->process->pid, server->trdid, rx_cycle );
[564]344#endif
345
346#if DEBUG_CHDEV_SERVER_TX
347tx_cycle = (uint32_t)hal_get_cycles();
348if( (chdev->is_rx == 0) && (DEBUG_CHDEV_SERVER_TX < tx_cycle) )
[625]349printk("\n[%s] server thread[%x,%x] found TX queue empty => blocks / cycle %d\n",
[593]350__FUNCTION__ , server->process->pid, server->trdid, tx_cycle );
[564]351#endif
[440]352            // block
353            thread_block( XPTR( local_cxy , server ) , THREAD_BLOCKED_IDLE ); 
354
[408]355            // deschedule
356            sched_yield("I/O queue empty");
[5]357        } 
[407]358        else                            // waiting queue not empty
[5]359        {
[601]360            // release lock protecting the waiting queue
361            remote_busylock_release( lock_xp );
362
[407]363            // get extended pointer on first client thread
[564]364            client_xp = XLIST_FIRST( root_xp , thread_t , wait_list );
[5]365
[440]366            // get client thread cluster and local pointer
[407]367            client_cxy = GET_CXY( client_xp );
[440]368            client_ptr = GET_PTR( client_xp );
[407]369
[619]370#if( DEBUG_CHDEV_SERVER_TX || DEBUG_CHDEV_SERVER_RX )
[601]371process_t * process      = hal_remote_lpt( XPTR( client_cxy , &client_ptr->process ) );
372pid_t       client_pid   = hal_remote_l32( XPTR( client_cxy , &process->pid ) );
[619]373trdid_t     client_trdid = hal_remote_l32( XPTR( client_cxy , &client_ptr->trdid ) );
[601]374#endif
[440]375
[438]376#if DEBUG_CHDEV_SERVER_RX
[564]377rx_cycle = (uint32_t)hal_get_cycles();
[438]378if( (chdev->is_rx) && (DEBUG_CHDEV_SERVER_RX < rx_cycle) )
[625]379printk("\n[%s] server thread[%x,%x] get command from client thread[%x,%x] / cycle %d\n",
[619]380__FUNCTION__, server->process->pid, server->trdid, client_pid, client_trdid, rx_cycle );
[437]381#endif
382
[438]383#if DEBUG_CHDEV_SERVER_TX
[564]384tx_cycle = (uint32_t)hal_get_cycles();
[438]385if( (chdev->is_rx == 0) && (DEBUG_CHDEV_SERVER_TX < tx_cycle) )
[625]386printk("\n[%s] server thread[%x,%x] get command from client thread[%x,%x] / cycle %d\n",
[619]387__FUNCTION__, server->process->pid, server->trdid, client_pid, client_trdid, tx_cycle );
[437]388#endif
389
[438]390#if (DEBUG_SYS_READ & 1)
[437]391enter_chdev_server_read = (uint32_t)hal_get_cycles();
392#endif
393
[438]394#if (DEBUG_SYS_WRITE & 1)
[437]395enter_chdev_server_write = (uint32_t)hal_get_cycles();
396#endif
397
[601]398            // call the (blocking) driver command function
399            // to launch I/O operation AND wait completion
[407]400            chdev->cmd( client_xp );
[5]401       
[601]402            // unblock client thread when driver returns
[418]403            thread_unblock( client_xp , THREAD_BLOCKED_IO );
404
[601]405            // get the lock protecting the waiting queue
406            remote_busylock_acquire( lock_xp );
407
408            // remove this client thread from chdev waiting queue
409            xlist_unlink( XPTR( client_cxy , &client_ptr->wait_list ) );
410
411            // release lock protecting the waiting queue
412            remote_busylock_release( lock_xp );
413
[438]414#if DEBUG_CHDEV_SERVER_RX
[437]415rx_cycle = (uint32_t)hal_get_cycles();
[438]416if( (chdev->is_rx) && (DEBUG_CHDEV_SERVER_RX < rx_cycle) )
[625]417printk("\n[%s] thread[%x,%x] completes command for client thread[%x,%x] / cycle %d\n",
[619]418__FUNCTION__, server->process->pid, server->trdid, client_pid, client_trdid, rx_cycle );
[433]419#endif
[5]420
[438]421#if DEBUG_CHDEV_SERVER_TX
[437]422tx_cycle = (uint32_t)hal_get_cycles();
[438]423if( (chdev->is_rx == 0) && (DEBUG_CHDEV_SERVER_TX < tx_cycle) )
[625]424printk("\n[%s] thread[%x,%x] completes command for client thread[%x,%x] / cycle %d\n",
[619]425__FUNCTION__, server->process->pid, server->trdid, client_pid, client_trdid, tX_cycle );
[437]426#endif
427
[438]428#if (DEBUG_SYS_READ & 1)
[435]429exit_chdev_server_read = (uint32_t)hal_get_cycles();
[407]430#endif
431
[438]432#if (DEBUG_SYS_WRITE & 1)
[435]433exit_chdev_server_write = (uint32_t)hal_get_cycles();
434#endif
435
[407]436        }
[5]437    }  // end while
[619]438}  // end chdev_server_func()
[5]439
[428]440////////////////////////////////////////
441xptr_t chdev_from_file( xptr_t file_xp )
442{
443    cxy_t         file_cxy;
444    vfs_file_t  * file_ptr;
445    uint32_t      inode_type;
446    vfs_inode_t * inode_ptr;
447    chdev_t     * chdev_ptr;
448
[492]449    assert( (file_xp != XPTR_NULL) ,
[440]450    "file_xp == XPTR_NULL\n" );
451
[428]452    // get cluster and local pointer on remote file descriptor
453    // associated inode and chdev are stored in same cluster as the file desc.
454    file_cxy  = GET_CXY( file_xp );
455    file_ptr  = (vfs_file_t *)GET_PTR( file_xp );
456
457    // get inode type from file descriptor
[564]458    inode_type = hal_remote_l32( XPTR( file_cxy , &file_ptr->type ) );
[428]459    inode_ptr  = (vfs_inode_t *)hal_remote_lpt( XPTR( file_cxy , &file_ptr->inode ) );
460
[492]461    assert( (inode_type == INODE_TYPE_DEV) ,
[440]462    "inode type %d is not INODE_TYPE_DEV\n", inode_type );
[428]463
464    // get chdev local pointer from inode extension
465    chdev_ptr = (chdev_t *)hal_remote_lpt( XPTR( file_cxy , &inode_ptr->extend ) );
466
467    return XPTR( file_cxy , chdev_ptr );
468
469}  // end chdev_from_file()
470
[564]471//////////////////////////////
[485]472void chdev_dir_display( void )
[317]473{
[428]474    uint32_t  i;
475    cxy_t     cxy;
476    chdev_t * ptr;
477    uint32_t  base;
[317]478
[428]479    // get pointers on TXT0 chdev
480    xptr_t    txt0_xp  = chdev_dir.txt_tx[0];
481    cxy_t     txt0_cxy = GET_CXY( txt0_xp );
482    chdev_t * txt0_ptr = GET_PTR( txt0_xp );
[317]483
[564]484    // get extended pointer on TXT0 lock
[428]485    xptr_t  lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock );
[317]486
[564]487    // get TXT0 lock
488    remote_busylock_acquire( lock_xp );
[317]489
[428]490    // header
491    nolock_printk("\n***** external chdevs directory *****\n");
[317]492
[428]493    // IOB
[564]494    if (chdev_dir.iob != XPTR_NULL )
[545]495    {
496        cxy  = GET_CXY( chdev_dir.iob );
497        ptr  = GET_PTR( chdev_dir.iob );
[564]498        base = (uint32_t)hal_remote_l64( XPTR( cxy , &ptr->base ) );
[545]499        nolock_printk("  - iob       : cxy = %X / ptr = %X / base = %X\n", cxy, ptr, base);
500    }
[407]501
[428]502    // PIC
503    cxy  = GET_CXY( chdev_dir.pic );
504    ptr  = GET_PTR( chdev_dir.pic );
[564]505    base = (uint32_t)hal_remote_l64( XPTR( cxy , &ptr->base ) );
[428]506    nolock_printk("  - pic       : cxy = %X / ptr = %X / base = %X\n", cxy, ptr, base);
[407]507
[428]508    // TXT
509    for( i = 0 ; i < LOCAL_CLUSTER->nb_txt_channels ; i++ )
510    {
511        cxy = GET_CXY( chdev_dir.txt_rx[i] );
512        ptr = GET_PTR( chdev_dir.txt_rx[i] );
[564]513        base = (uint32_t)hal_remote_l64( XPTR( cxy , &ptr->base ) );
[428]514        nolock_printk("  - txt_rx[%d] : cxy = %X / ptr = %X / base = %X\n", i, cxy, ptr, base);
[407]515
[428]516        cxy = GET_CXY( chdev_dir.txt_tx[i] );
517        ptr = GET_PTR( chdev_dir.txt_tx[i] );
[564]518        base = (uint32_t)hal_remote_l64( XPTR( cxy , &ptr->base ) );
[428]519        nolock_printk("  - txt_tx[%d] : cxy = %X / ptr = %X / base = %X\n", i, cxy, ptr, base);
520    }
[317]521
[428]522    // IOC
523    for( i = 0 ; i < LOCAL_CLUSTER->nb_ioc_channels ; i++ )
524    {
525        cxy = GET_CXY( chdev_dir.ioc[i] );
526        ptr = GET_PTR( chdev_dir.ioc[i] );
[564]527        base = (uint32_t)hal_remote_l64( XPTR( cxy , &ptr->base ) );
[428]528        nolock_printk("  - ioc[%d]    : cxy = %X / ptr = %X / base = %X\n", i, cxy, ptr, base);
529    }
[317]530
[428]531    // FBF
532    for( i = 0 ; i < LOCAL_CLUSTER->nb_fbf_channels ; i++ )
533    {
534        cxy  = GET_CXY( chdev_dir.fbf[i] );
535        ptr  = GET_PTR( chdev_dir.fbf[i] );
[564]536        base = (uint32_t)hal_remote_l64( XPTR( cxy , &ptr->base ) );
[428]537        nolock_printk("  - fbf[%d]    : cxy = %X / ptr = %X / base = %X\n", i, cxy, ptr, base);
538    }
[317]539
[428]540    // NIC
541    for( i = 0 ; i < LOCAL_CLUSTER->nb_nic_channels ; i++ )
542    {
543        cxy = GET_CXY( chdev_dir.nic_rx[i] );
544        ptr = GET_PTR( chdev_dir.nic_rx[i] );
[564]545        base = (uint32_t)hal_remote_l64( XPTR( cxy , &ptr->base ) );
[428]546        nolock_printk("  - nic_rx[%d] : cxy = %X / ptr = %X / base = %X\n", i, cxy, ptr, base);
[317]547
[428]548        cxy = GET_CXY( chdev_dir.nic_tx[i] );
549        ptr = GET_PTR( chdev_dir.nic_tx[i] );
[564]550        base = (uint32_t)hal_remote_l64( XPTR( cxy , &ptr->base ) );
[428]551        nolock_printk("  - nic_tx[%d] : cxy = %X / ptr = %X / base = %X\n", i, cxy, ptr, base);
552    }
[317]553
[428]554    // release lock
[564]555    remote_busylock_release( lock_xp );
[428]556
[317]557}  // end chdev_dir_display()
558
[447]559///////////////////////////////////////////
560void chdev_queue_display( xptr_t chdev_xp )
561{
562    cxy_t       chdev_cxy;          // chdev cluster
563    chdev_t   * chdev_ptr;          // chdev local pointer
564    xptr_t      root_xp;            // extended pointer on waiting queuue root
565    char        name[16];           // local copie of chdev name
566    xptr_t      iter_xp;            // extended pointer on xlist_t field in waiting thread
567    xptr_t      thread_xp;          // extended pointer on thread registered in queue
568    cxy_t       thread_cxy;         // cluster identifier for waiting thread
569    thread_t  * thread_ptr;         // local pointer on waiting thread
570    trdid_t     trdid;              // waiting thread identifier
571    process_t * process;            // waiting thread process descriptor
572    pid_t       pid;                // waiting thread process identifier
573
574    // get cluster and local pointer on chdev
575    chdev_cxy = GET_CXY( chdev_xp );
576    chdev_ptr = GET_PTR( chdev_xp );
577
578    // get extended pointer on root of requests queue
[450]579    root_xp = XPTR( chdev_cxy , &chdev_ptr->wait_root );
[447]580
581    // get chdev name
582    hal_remote_strcpy( XPTR( local_cxy , name ), XPTR( chdev_cxy , chdev_ptr->name ) );
583
[564]584    // get pointers on TXT0 chdev
585    xptr_t    txt0_xp  = chdev_dir.txt_tx[0];
586    cxy_t     txt0_cxy = GET_CXY( txt0_xp );
587    chdev_t * txt0_ptr = GET_PTR( txt0_xp );
588
589    // get extended pointer on TXT0 lock
590    xptr_t  lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock );
591
592    // get TXT0 lock
593    remote_busylock_acquire( lock_xp );
594
[447]595    // check queue empty
596    if( xlist_is_empty( root_xp ) )
597    {
[564]598        nolock_printk("\n***** Waiting queue empty for chdev %s\n", name ); 
[447]599    }
600    else
601    {
[564]602        nolock_printk("\n***** Waiting queue for chdev %s\n", name ); 
[447]603
604        // scan the waiting queue
605        XLIST_FOREACH( root_xp , iter_xp )
606        {
607            thread_xp  = XLIST_ELEMENT( iter_xp , thread_t , wait_list );
608            thread_cxy = GET_CXY( thread_xp );
609            thread_ptr = GET_PTR( thread_xp );
[564]610            trdid      = hal_remote_l32 ( XPTR( thread_cxy , &thread_ptr->trdid   ) );
[447]611            process    = hal_remote_lpt( XPTR( thread_cxy , &thread_ptr->process ) );
[564]612                        pid        = hal_remote_l32 ( XPTR( thread_cxy , &process->pid        ) );
[447]613
[601]614            nolock_printk("- thread[%x,%x]\n", pid, trdid );
[447]615        }
616    }
[564]617
618    // release TXT0 lock
619    remote_busylock_release( lock_xp );
620
[447]621}  // end chdev_queue_display()
622
Note: See TracBrowser for help on using the repository browser.