source: trunk/kernel/kern/chdev.c @ 633

Last change on this file since 633 was 625, checked in by alain, 6 years ago

Fix a bug in the vmm_remove_vseg() function: the physical pages
associated to an user DATA vseg were released to the kernel when
the target process descriptor was in the reference cluster.
This physical pages release should be done only when the page
forks counter value is zero.
All other modifications are cosmetic.

File size: 21.7 KB
RevLine 
[5]1/*
2 * chdev.c - channel device descriptor operations implementation.
3 *
4 * Authors  Alain Greiner   (2016)
5 *
6 * Copyright (c) UPMC Sorbonne Universites
7 *
8 * This file is part of ALMOS-MKH.
9 *
10 * ALMOS-MKH.is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; version 2.0 of the License.
13 *
14 * ALMOS-MKH.is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with ALMOS-MKH.; if not, write to the Free Software Foundation,
21 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 */
23
[14]24#include <kernel_config.h>
[457]25#include <hal_kernel_types.h>
[5]26#include <hal_special.h>
[447]27#include <hal_remote.h>
[407]28#include <hal_irqmask.h>
[16]29#include <printk.h>
[5]30#include <boot_info.h>
31#include <xlist.h>
32#include <kmem.h>
[407]33#include <scheduler.h>
[5]34#include <thread.h>
35#include <rpc.h>
36#include <chdev.h>
[23]37#include <devfs.h>
[5]38
[564]39//////////////////////////////////////////////////////////////////////////////////////
40// Extern global variables
41//////////////////////////////////////////////////////////////////////////////////////
[317]42
[564]43extern chdev_directory_t    chdev_dir;         // allocated in kernel_init.c
[317]44
[564]45
[438]46#if (DEBUG_SYS_READ & 1)
[435]47extern uint32_t enter_chdev_cmd_read;
48extern uint32_t exit_chdev_cmd_read;
49extern uint32_t enter_chdev_server_read;
50extern uint32_t exit_chdev_server_read;
[407]51#endif
[317]52
[438]53#if (DEBUG_SYS_WRITE & 1)
[435]54extern uint32_t enter_chdev_cmd_write;
55extern uint32_t exit_chdev_cmd_write;
56extern uint32_t enter_chdev_server_write;
57extern uint32_t exit_chdev_server_write;
58#endif
59
[5]60////////////////////////////////////////////
61char * chdev_func_str( uint32_t func_type ) 
62{
[564]63    switch ( func_type ) 
64    {
[583]65        case DEV_FUNC_RAM: return "RAM";
66        case DEV_FUNC_ROM: return "ROM";
67        case DEV_FUNC_FBF: return "FBF";
68        case DEV_FUNC_IOB: return "IOB";
69        case DEV_FUNC_IOC: return "IOC";
70        case DEV_FUNC_MMC: return "MMC";
71        case DEV_FUNC_DMA: return "DMA";
72        case DEV_FUNC_NIC: return "NIC";
73        case DEV_FUNC_TIM: return "TIM";
74        case DEV_FUNC_TXT: return "TXT";
75        case DEV_FUNC_ICU: return "ICU";
76        case DEV_FUNC_PIC: return "PIC";
77        default:           return "undefined";
[527]78    }
[5]79}
80
81/////////////////////////////////////////
82chdev_t * chdev_create( uint32_t    func,
83                        uint32_t    impl,
84                        uint32_t    channel,
85                        uint32_t    is_rx,
86                        xptr_t      base )
87{
88    chdev_t    * chdev;
89    kmem_req_t   req;
90
91    // allocate memory for chdev
92    req.type   = KMEM_DEVICE;
93    req.flags  = AF_ZERO;
94    chdev      = (chdev_t *)kmem_alloc( &req );
95
96    if( chdev == NULL ) return NULL;
97
[564]98    // initialize lock
99    remote_busylock_init( XPTR( local_cxy , &chdev->wait_lock ), LOCK_CHDEV_QUEUE );
100
101    // initialise waiting queue
[5]102    xlist_root_init( XPTR( local_cxy , &chdev->wait_root ) );
103
104    // initialize attributes
105    chdev->func    =  func;
106    chdev->impl    =  impl;
107    chdev->channel =  channel;
108    chdev->is_rx   =  is_rx;
109    chdev->base    =  base; 
110
111    return chdev;
112
113}  // end chdev_create()
114
115///////////////////////////////////
116void chdev_print( chdev_t * chdev )
117{
118    printk("\n - func      = %s"
119           "\n - channel   = %d"
120           "\n - base      = %l"
121           "\n - cmd       = %x"
122           "\n - isr       = %x"
123           "\n - chdev     = %x\n",
124           chdev_func_str(chdev->func),
125           chdev->channel,
126           chdev->base,
127           chdev->cmd,
128           chdev->isr,
129           chdev );
130}
131
[407]132//////////////////////////////////////////////////
133void chdev_register_command( xptr_t     chdev_xp )
[5]134{
[407]135    thread_t * server_ptr;    // local pointer on server thread associated to chdev
[440]136    xptr_t     server_xp;     // extended pointer on server thread
[407]137    core_t   * core_ptr;      // local pointer on core running the server thread
[457]138    uint32_t   server_lid;    // core running the server thread local index
[564]139    xptr_t     lock_xp;       // extended pointer on lock protecting the chdev state
[5]140
[438]141#if (DEBUG_SYS_READ & 1)
[435]142enter_chdev_cmd_read = (uint32_t)hal_get_cycles();
[418]143#endif
144
[438]145#if (DEBUG_SYS_WRITE & 1)
[435]146enter_chdev_cmd_write = (uint32_t)hal_get_cycles();
147#endif
148
[407]149    thread_t * this = CURRENT_THREAD;
150
[440]151    // get chdev cluster and local pointer
[5]152    cxy_t     chdev_cxy = GET_CXY( chdev_xp );
[440]153    chdev_t * chdev_ptr = GET_PTR( chdev_xp );
[5]154
[581]155    // check calling thread can yield
156    thread_assert_can_yield( this , __FUNCTION__ );
[564]157
[440]158    // get local and extended pointers on server thread
159    server_ptr = (thread_t *)hal_remote_lpt( XPTR( chdev_cxy , &chdev_ptr->server) );
160    server_xp  = XPTR( chdev_cxy , server_ptr );
161
162    // get local pointer on core running the server thread
163    core_ptr   = (core_t *)hal_remote_lpt( XPTR( chdev_cxy , &server_ptr->core ) );
164
165    // get server core local index
[564]166    server_lid = hal_remote_l32( XPTR( chdev_cxy , &core_ptr->lid ) );
[440]167
[438]168#if (DEBUG_CHDEV_CMD_RX || DEBUG_CHDEV_CMD_TX)
[619]169bool_t      is_rx        = hal_remote_l32( XPTR( chdev_cxy , &chdev_ptr->is_rx ) );
170trdid_t     server_trdid = hal_remote_l32( XPTR( chdev_cxy , &server_ptr->trdid ) ); 
171process_t * process_ptr  = hal_remote_lpt( XPTR( chdev_cxy , &server_ptr->process ) );
172pid_t       server_pid   = hal_remote_l32( XPTR( chdev_cxy , &process_ptr->pid ) );
[437]173#endif
174   
[438]175#if DEBUG_CHDEV_CMD_RX
[437]176uint32_t rx_cycle = (uint32_t)hal_get_cycles();
[438]177if( (is_rx) && (DEBUG_CHDEV_CMD_RX < rx_cycle) )
[625]178printk("\n[%s] client thread[%x,%x] enter for RX / server[%x,%x] / cycle %d\n",
[619]179__FUNCTION__, this->process->pid, this->trdid, server_pid, server_trdid, rx_cycle );
[437]180#endif
181
[438]182#if DEBUG_CHDEV_CMD_TX
[437]183uint32_t tx_cycle = (uint32_t)hal_get_cycles();
[438]184if( (is_rx == 0) && (DEBUG_CHDEV_CMD_TX < tx_cycle) )
[625]185printk("\n[%s] client thread[%x,%x] enter for TX / server[%x,%x] / cycle %d\n",
[619]186__FUNCTION__, this->process->pid, this->trdid, server_pid, server_trdid, tx_cycle );
[437]187#endif
188
[440]189    // build extended pointer on client thread xlist
190    xptr_t  list_xp    = XPTR( local_cxy , &this->wait_list );
[5]191
[440]192    // build extended pointer on chdev waiting queue root
193    xptr_t  root_xp    = XPTR( chdev_cxy , &chdev_ptr->wait_root );
[5]194
[440]195    // build extended pointer on lock protecting chdev waiting queue
[564]196    lock_xp            = XPTR( chdev_cxy , &chdev_ptr->wait_lock );
[407]197
[625]198    // The following actions execute in critical section,
199    // because the lock_acquire / lock_release :
[564]200    // (1) take the lock protecting the chdev state
[625]201    // (2) register client thread in server queue
202    // (3) unblock the server thread and block client thread
203    // (4) send IPI to force server scheduling
204    // (5) release the lock protecting waiting queue
[407]205
[625]206    // 1. take the lock protecting chdev queue
[564]207    remote_busylock_acquire( lock_xp );
[408]208
[625]209    // 2. register client thread in waiting queue
210    xlist_add_last( root_xp , list_xp );
[408]211
[450]212#if (DEBUG_CHDEV_CMD_TX & 1)
[625]213if( (is_rx == 0)  && (DEBUG_CHDEV_CMD_TX < tx_cycle) )
214printk("\n[%s] client thread[%x,%x] registered write request in chdev\n",
[601]215__FUNCTION__, this->process->pid, this->trdid );
[450]216#endif
[625]217 
[457]218#if (DEBUG_CHDEV_CMD_RX & 1)
[625]219if( (is_rx)  && (DEBUG_CHDEV_CMD_RX < rx_cycle) )
220printk("\n[%s] client thread[%x,%x] registered read request in chdev\n",
221__FUNCTION__, this->process->pid, this->trdid );
[457]222#endif
[625]223 
224    // 3. client thread unblocks server thread and blocks itself
[440]225    thread_unblock( server_xp , THREAD_BLOCKED_IDLE );
[625]226    thread_block( XPTR( local_cxy , CURRENT_THREAD ) , THREAD_BLOCKED_IO );
[440]227
[450]228#if (DEBUG_CHDEV_CMD_TX & 1)
229if( (is_rx == 0) && (DEBUG_CHDEV_CMD_TX < tx_cycle) )
[625]230printk("\n[%s] client thread[%x,%x] unblock server thread[%x,%x] and block itsef\n",
231__FUNCTION__, this->process->pid, this->trdid, server_pid, server_trdid );
[450]232#endif
233
[457]234#if (DEBUG_CHDEV_CMD_RX & 1)
235if( (is_rx) && (DEBUG_CHDEV_CMD_RX < rx_cycle) )
[625]236printk("\n[%s] client thread[%x,%x] unblock server thread[%x,%x] and block itsef\n",
237__FUNCTION__, this->process->pid, this->trdid, server_pid, server_trdid );
[457]238#endif
239
[625]240    // 4. send IPI to core running the server thread when server core != client core
[457]241    if( (server_lid != this->core->lid) || (local_cxy != chdev_cxy) )
[450]242    {
[457]243        dev_pic_send_ipi( chdev_cxy , server_lid ); 
244
[450]245#if (DEBUG_CHDEV_CMD_TX & 1)
246if( (is_rx == 0)  && (DEBUG_CHDEV_CMD_TX < tx_cycle) )
[593]247printk("\n[%s] client thread[%x,%x] sent IPI to TX server thread[%x,%x]\n",
[619]248__FUNCTION__, this->process->pid, this->trdid, server_pid, server_trdid );
[450]249#endif
250
[457]251#if (DEBUG_CHDEV_CMD_RX & 1)
252if( (is_rx)  && (DEBUG_CHDEV_CMD_RX < rx_cycle) )
[593]253printk("\n[%s] client thread[%x,%x] sent IPI to RX server thread[%x,%x]\n",
[619]254__FUNCTION__, this->process->pid, this->trdid, server_pid, server_trdid );
[457]255#endif
256
[450]257    }
258 
[625]259    // 5. release lock protecting chdev queue
[564]260    remote_busylock_release( lock_xp );
[440]261
[408]262    // deschedule
263    sched_yield("blocked on I/O");
[407]264
[438]265#if DEBUG_CHDEV_CMD_RX
[437]266rx_cycle = (uint32_t)hal_get_cycles();
[438]267if( (is_rx) && (DEBUG_CHDEV_CMD_RX < rx_cycle) )
[625]268printk("\n[%s] client thread[%x,%x] exit for RX / cycle %d\n",
[593]269__FUNCTION__, this->process->pid, this->trdid, rx_cycle );
[433]270#endif
271
[438]272#if DEBUG_CHDEV_CMD_TX
[437]273tx_cycle = (uint32_t)hal_get_cycles();
[438]274if( (is_rx == 0) && (DEBUG_CHDEV_CMD_TX < tx_cycle) )
[625]275printk("\n[%s] client thread[%x,%x] exit for TX / cycle %d\n",
[593]276__FUNCTION__, this->process->pid, this->trdid, tx_cycle );
[437]277#endif
278
[438]279#if (DEBUG_SYS_READ & 1)
[435]280exit_chdev_cmd_read = (uint32_t)hal_get_cycles();
[418]281#endif
282
[438]283#if (DEBUG_SYS_WRITE & 1)
[435]284exit_chdev_cmd_write = (uint32_t)hal_get_cycles();
285#endif
286
[5]287}  // end chdev_register_command()
288
[619]289/////////////////////////////////////////
290void chdev_server_func( chdev_t * chdev )
[5]291{
292    xptr_t          client_xp;    // extended pointer on waiting thread
293    cxy_t           client_cxy;   // cluster of client thread
294    thread_t      * client_ptr;   // local pointer on client thread
[407]295    thread_t      * server;       // local pointer on server thread
[5]296    xptr_t          root_xp;      // extended pointer on device waiting queue root
[407]297    xptr_t          lock_xp;      // extended pointer on lock ptotecting chdev queue
[5]298
299    server = CURRENT_THREAD;
300
[564]301    // build extended pointer on root of client threads queue
[5]302    root_xp = XPTR( local_cxy , &chdev->wait_root );
[564]303
304    // build extended pointer on lock protecting client threads queue
[407]305    lock_xp = XPTR( local_cxy , &chdev->wait_lock );
[5]306
[407]307        // This infinite loop is executed by the DEV thread
308    // to handle commands registered in the chdev queue.
[5]309    while( 1 )
310    {
[564]311
[619]312#if( DEBUG_CHDEV_SERVER_RX || DEBUG_CHDEV_SERVER_TX )
[564]313uint32_t rx_cycle = (uint32_t)hal_get_cycles();
314if( (chdev->is_rx) && (DEBUG_CHDEV_SERVER_RX < rx_cycle) )
[625]315printk("\n[%s] server thread[%x,%x] check TXT_RX channel %d / cycle %d\n",
[619]316__FUNCTION__ , server->process->pid, server->trdid, chdev->channel, rx_cycle );
[564]317#endif
318
319#if DEBUG_CHDEV_SERVER_TX
320uint32_t tx_cycle = (uint32_t)hal_get_cycles();
321if( (chdev->is_rx == 0) && (DEBUG_CHDEV_SERVER_TX < tx_cycle) )
[619]322printk("\n[%s] thread[%x,%x] check TXT_TX channel %d / cycle %d\n",
323__FUNCTION__ , server->process->pid, server->trdid, chdev->channel, tx_cycle );
[564]324#endif
325
[601]326        // check server thread can yield
327        thread_assert_can_yield( server , __FUNCTION__ );
328
[407]329        // get the lock protecting the waiting queue
[564]330        remote_busylock_acquire( lock_xp );
[407]331
[5]332        // check waiting queue state
[407]333        if( xlist_is_empty( root_xp ) ) // waiting queue empty
[5]334        {
[601]335            // release lock protecting the waiting queue
336            remote_busylock_release( lock_xp );
[564]337
338#if DEBUG_CHDEV_SERVER_RX
339rx_cycle = (uint32_t)hal_get_cycles();
340if( (chdev->is_rx) && (DEBUG_CHDEV_SERVER_RX < rx_cycle) )
[625]341printk("\n[%s] server thread[%x,%x] found RX queue empty => blocks / cycle %d\n",
[593]342__FUNCTION__ , server->process->pid, server->trdid, rx_cycle );
[564]343#endif
344
345#if DEBUG_CHDEV_SERVER_TX
346tx_cycle = (uint32_t)hal_get_cycles();
347if( (chdev->is_rx == 0) && (DEBUG_CHDEV_SERVER_TX < tx_cycle) )
[625]348printk("\n[%s] server thread[%x,%x] found TX queue empty => blocks / cycle %d\n",
[593]349__FUNCTION__ , server->process->pid, server->trdid, tx_cycle );
[564]350#endif
[440]351            // block
352            thread_block( XPTR( local_cxy , server ) , THREAD_BLOCKED_IDLE ); 
353
[408]354            // deschedule
355            sched_yield("I/O queue empty");
[5]356        } 
[407]357        else                            // waiting queue not empty
[5]358        {
[601]359            // release lock protecting the waiting queue
360            remote_busylock_release( lock_xp );
361
[407]362            // get extended pointer on first client thread
[564]363            client_xp = XLIST_FIRST( root_xp , thread_t , wait_list );
[5]364
[440]365            // get client thread cluster and local pointer
[407]366            client_cxy = GET_CXY( client_xp );
[440]367            client_ptr = GET_PTR( client_xp );
[407]368
[619]369#if( DEBUG_CHDEV_SERVER_TX || DEBUG_CHDEV_SERVER_RX )
[601]370process_t * process      = hal_remote_lpt( XPTR( client_cxy , &client_ptr->process ) );
371pid_t       client_pid   = hal_remote_l32( XPTR( client_cxy , &process->pid ) );
[619]372trdid_t     client_trdid = hal_remote_l32( XPTR( client_cxy , &client_ptr->trdid ) );
[601]373#endif
[440]374
[438]375#if DEBUG_CHDEV_SERVER_RX
[564]376rx_cycle = (uint32_t)hal_get_cycles();
[438]377if( (chdev->is_rx) && (DEBUG_CHDEV_SERVER_RX < rx_cycle) )
[625]378printk("\n[%s] server thread[%x,%x] get command from client thread[%x,%x] / cycle %d\n",
[619]379__FUNCTION__, server->process->pid, server->trdid, client_pid, client_trdid, rx_cycle );
[437]380#endif
381
[438]382#if DEBUG_CHDEV_SERVER_TX
[564]383tx_cycle = (uint32_t)hal_get_cycles();
[438]384if( (chdev->is_rx == 0) && (DEBUG_CHDEV_SERVER_TX < tx_cycle) )
[625]385printk("\n[%s] server thread[%x,%x] get command from client thread[%x,%x] / cycle %d\n",
[619]386__FUNCTION__, server->process->pid, server->trdid, client_pid, client_trdid, tx_cycle );
[437]387#endif
388
[438]389#if (DEBUG_SYS_READ & 1)
[437]390enter_chdev_server_read = (uint32_t)hal_get_cycles();
391#endif
392
[438]393#if (DEBUG_SYS_WRITE & 1)
[437]394enter_chdev_server_write = (uint32_t)hal_get_cycles();
395#endif
396
[601]397            // call the (blocking) driver command function
398            // to launch I/O operation AND wait completion
[407]399            chdev->cmd( client_xp );
[5]400       
[601]401            // unblock client thread when driver returns
[418]402            thread_unblock( client_xp , THREAD_BLOCKED_IO );
403
[601]404            // get the lock protecting the waiting queue
405            remote_busylock_acquire( lock_xp );
406
407            // remove this client thread from chdev waiting queue
408            xlist_unlink( XPTR( client_cxy , &client_ptr->wait_list ) );
409
410            // release lock protecting the waiting queue
411            remote_busylock_release( lock_xp );
412
[438]413#if DEBUG_CHDEV_SERVER_RX
[437]414rx_cycle = (uint32_t)hal_get_cycles();
[438]415if( (chdev->is_rx) && (DEBUG_CHDEV_SERVER_RX < rx_cycle) )
[625]416printk("\n[%s] thread[%x,%x] completes command for client thread[%x,%x] / cycle %d\n",
[619]417__FUNCTION__, server->process->pid, server->trdid, client_pid, client_trdid, rx_cycle );
[433]418#endif
[5]419
[438]420#if DEBUG_CHDEV_SERVER_TX
[437]421tx_cycle = (uint32_t)hal_get_cycles();
[438]422if( (chdev->is_rx == 0) && (DEBUG_CHDEV_SERVER_TX < tx_cycle) )
[625]423printk("\n[%s] thread[%x,%x] completes command for client thread[%x,%x] / cycle %d\n",
[619]424__FUNCTION__, server->process->pid, server->trdid, client_pid, client_trdid, tX_cycle );
[437]425#endif
426
[438]427#if (DEBUG_SYS_READ & 1)
[435]428exit_chdev_server_read = (uint32_t)hal_get_cycles();
[407]429#endif
430
[438]431#if (DEBUG_SYS_WRITE & 1)
[435]432exit_chdev_server_write = (uint32_t)hal_get_cycles();
433#endif
434
[407]435        }
[5]436    }  // end while
[619]437}  // end chdev_server_func()
[5]438
[428]439////////////////////////////////////////
440xptr_t chdev_from_file( xptr_t file_xp )
441{
442    cxy_t         file_cxy;
443    vfs_file_t  * file_ptr;
444    uint32_t      inode_type;
445    vfs_inode_t * inode_ptr;
446    chdev_t     * chdev_ptr;
447
[492]448    assert( (file_xp != XPTR_NULL) ,
[440]449    "file_xp == XPTR_NULL\n" );
450
[428]451    // get cluster and local pointer on remote file descriptor
452    // associated inode and chdev are stored in same cluster as the file desc.
453    file_cxy  = GET_CXY( file_xp );
454    file_ptr  = (vfs_file_t *)GET_PTR( file_xp );
455
456    // get inode type from file descriptor
[564]457    inode_type = hal_remote_l32( XPTR( file_cxy , &file_ptr->type ) );
[428]458    inode_ptr  = (vfs_inode_t *)hal_remote_lpt( XPTR( file_cxy , &file_ptr->inode ) );
459
[492]460    assert( (inode_type == INODE_TYPE_DEV) ,
[440]461    "inode type %d is not INODE_TYPE_DEV\n", inode_type );
[428]462
463    // get chdev local pointer from inode extension
464    chdev_ptr = (chdev_t *)hal_remote_lpt( XPTR( file_cxy , &inode_ptr->extend ) );
465
466    return XPTR( file_cxy , chdev_ptr );
467
468}  // end chdev_from_file()
469
[564]470//////////////////////////////
[485]471void chdev_dir_display( void )
[317]472{
[428]473    uint32_t  i;
474    cxy_t     cxy;
475    chdev_t * ptr;
476    uint32_t  base;
[317]477
[428]478    // get pointers on TXT0 chdev
479    xptr_t    txt0_xp  = chdev_dir.txt_tx[0];
480    cxy_t     txt0_cxy = GET_CXY( txt0_xp );
481    chdev_t * txt0_ptr = GET_PTR( txt0_xp );
[317]482
[564]483    // get extended pointer on TXT0 lock
[428]484    xptr_t  lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock );
[317]485
[564]486    // get TXT0 lock
487    remote_busylock_acquire( lock_xp );
[317]488
[428]489    // header
490    nolock_printk("\n***** external chdevs directory *****\n");
[317]491
[428]492    // IOB
[564]493    if (chdev_dir.iob != XPTR_NULL )
[545]494    {
495        cxy  = GET_CXY( chdev_dir.iob );
496        ptr  = GET_PTR( chdev_dir.iob );
[564]497        base = (uint32_t)hal_remote_l64( XPTR( cxy , &ptr->base ) );
[545]498        nolock_printk("  - iob       : cxy = %X / ptr = %X / base = %X\n", cxy, ptr, base);
499    }
[407]500
[428]501    // PIC
502    cxy  = GET_CXY( chdev_dir.pic );
503    ptr  = GET_PTR( chdev_dir.pic );
[564]504    base = (uint32_t)hal_remote_l64( XPTR( cxy , &ptr->base ) );
[428]505    nolock_printk("  - pic       : cxy = %X / ptr = %X / base = %X\n", cxy, ptr, base);
[407]506
[428]507    // TXT
508    for( i = 0 ; i < LOCAL_CLUSTER->nb_txt_channels ; i++ )
509    {
510        cxy = GET_CXY( chdev_dir.txt_rx[i] );
511        ptr = GET_PTR( chdev_dir.txt_rx[i] );
[564]512        base = (uint32_t)hal_remote_l64( XPTR( cxy , &ptr->base ) );
[428]513        nolock_printk("  - txt_rx[%d] : cxy = %X / ptr = %X / base = %X\n", i, cxy, ptr, base);
[407]514
[428]515        cxy = GET_CXY( chdev_dir.txt_tx[i] );
516        ptr = GET_PTR( chdev_dir.txt_tx[i] );
[564]517        base = (uint32_t)hal_remote_l64( XPTR( cxy , &ptr->base ) );
[428]518        nolock_printk("  - txt_tx[%d] : cxy = %X / ptr = %X / base = %X\n", i, cxy, ptr, base);
519    }
[317]520
[428]521    // IOC
522    for( i = 0 ; i < LOCAL_CLUSTER->nb_ioc_channels ; i++ )
523    {
524        cxy = GET_CXY( chdev_dir.ioc[i] );
525        ptr = GET_PTR( chdev_dir.ioc[i] );
[564]526        base = (uint32_t)hal_remote_l64( XPTR( cxy , &ptr->base ) );
[428]527        nolock_printk("  - ioc[%d]    : cxy = %X / ptr = %X / base = %X\n", i, cxy, ptr, base);
528    }
[317]529
[428]530    // FBF
531    for( i = 0 ; i < LOCAL_CLUSTER->nb_fbf_channels ; i++ )
532    {
533        cxy  = GET_CXY( chdev_dir.fbf[i] );
534        ptr  = GET_PTR( chdev_dir.fbf[i] );
[564]535        base = (uint32_t)hal_remote_l64( XPTR( cxy , &ptr->base ) );
[428]536        nolock_printk("  - fbf[%d]    : cxy = %X / ptr = %X / base = %X\n", i, cxy, ptr, base);
537    }
[317]538
[428]539    // NIC
540    for( i = 0 ; i < LOCAL_CLUSTER->nb_nic_channels ; i++ )
541    {
542        cxy = GET_CXY( chdev_dir.nic_rx[i] );
543        ptr = GET_PTR( chdev_dir.nic_rx[i] );
[564]544        base = (uint32_t)hal_remote_l64( XPTR( cxy , &ptr->base ) );
[428]545        nolock_printk("  - nic_rx[%d] : cxy = %X / ptr = %X / base = %X\n", i, cxy, ptr, base);
[317]546
[428]547        cxy = GET_CXY( chdev_dir.nic_tx[i] );
548        ptr = GET_PTR( chdev_dir.nic_tx[i] );
[564]549        base = (uint32_t)hal_remote_l64( XPTR( cxy , &ptr->base ) );
[428]550        nolock_printk("  - nic_tx[%d] : cxy = %X / ptr = %X / base = %X\n", i, cxy, ptr, base);
551    }
[317]552
[428]553    // release lock
[564]554    remote_busylock_release( lock_xp );
[428]555
[317]556}  // end chdev_dir_display()
557
[447]558///////////////////////////////////////////
559void chdev_queue_display( xptr_t chdev_xp )
560{
561    cxy_t       chdev_cxy;          // chdev cluster
562    chdev_t   * chdev_ptr;          // chdev local pointer
563    xptr_t      root_xp;            // extended pointer on waiting queuue root
564    char        name[16];           // local copie of chdev name
565    xptr_t      iter_xp;            // extended pointer on xlist_t field in waiting thread
566    xptr_t      thread_xp;          // extended pointer on thread registered in queue
567    cxy_t       thread_cxy;         // cluster identifier for waiting thread
568    thread_t  * thread_ptr;         // local pointer on waiting thread
569    trdid_t     trdid;              // waiting thread identifier
570    process_t * process;            // waiting thread process descriptor
571    pid_t       pid;                // waiting thread process identifier
572
573    // get cluster and local pointer on chdev
574    chdev_cxy = GET_CXY( chdev_xp );
575    chdev_ptr = GET_PTR( chdev_xp );
576
577    // get extended pointer on root of requests queue
[450]578    root_xp = XPTR( chdev_cxy , &chdev_ptr->wait_root );
[447]579
580    // get chdev name
581    hal_remote_strcpy( XPTR( local_cxy , name ), XPTR( chdev_cxy , chdev_ptr->name ) );
582
[564]583    // get pointers on TXT0 chdev
584    xptr_t    txt0_xp  = chdev_dir.txt_tx[0];
585    cxy_t     txt0_cxy = GET_CXY( txt0_xp );
586    chdev_t * txt0_ptr = GET_PTR( txt0_xp );
587
588    // get extended pointer on TXT0 lock
589    xptr_t  lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock );
590
591    // get TXT0 lock
592    remote_busylock_acquire( lock_xp );
593
[447]594    // check queue empty
595    if( xlist_is_empty( root_xp ) )
596    {
[564]597        nolock_printk("\n***** Waiting queue empty for chdev %s\n", name ); 
[447]598    }
599    else
600    {
[564]601        nolock_printk("\n***** Waiting queue for chdev %s\n", name ); 
[447]602
603        // scan the waiting queue
604        XLIST_FOREACH( root_xp , iter_xp )
605        {
606            thread_xp  = XLIST_ELEMENT( iter_xp , thread_t , wait_list );
607            thread_cxy = GET_CXY( thread_xp );
608            thread_ptr = GET_PTR( thread_xp );
[564]609            trdid      = hal_remote_l32 ( XPTR( thread_cxy , &thread_ptr->trdid   ) );
[447]610            process    = hal_remote_lpt( XPTR( thread_cxy , &thread_ptr->process ) );
[564]611                        pid        = hal_remote_l32 ( XPTR( thread_cxy , &process->pid        ) );
[447]612
[601]613            nolock_printk("- thread[%x,%x]\n", pid, trdid );
[447]614        }
615    }
[564]616
617    // release TXT0 lock
618    remote_busylock_release( lock_xp );
619
[447]620}  // end chdev_queue_display()
621
Note: See TracBrowser for help on using the repository browser.