Changeset 564 for trunk/kernel
- Timestamp:
- Oct 4, 2018, 11:47:36 PM (6 years ago)
- Location:
- trunk/kernel/kern
- Files:
-
- 9 deleted
- 19 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/kernel/kern/chdev.c
r545 r564 37 37 #include <devfs.h> 38 38 39 40 extern chdev_directory_t chdev_dir; // allocated in kernel_init.c 39 ////////////////////////////////////////////////////////////////////////////////////// 40 // Extern global variables 41 ////////////////////////////////////////////////////////////////////////////////////// 42 43 extern chdev_directory_t chdev_dir; // allocated in kernel_init.c 44 41 45 42 46 #if (DEBUG_SYS_READ & 1) … … 57 61 char * chdev_func_str( uint32_t func_type ) 58 62 { 59 switch ( func_type ) { 63 switch ( func_type ) 64 { 60 65 case DEV_FUNC_RAM: return "RAM"; 61 66 case DEV_FUNC_ROM: return "ROM"; … … 91 96 if( chdev == NULL ) return NULL; 92 97 93 // initialize waiting threads queue and associated lock 94 remote_spinlock_init( XPTR( local_cxy , &chdev->wait_lock ) ); 98 // initialize lock 99 remote_busylock_init( XPTR( local_cxy , &chdev->wait_lock ), LOCK_CHDEV_QUEUE ); 100 101 // initialise waiting queue 95 102 xlist_root_init( XPTR( local_cxy , &chdev->wait_root ) ); 96 103 … … 130 137 core_t * core_ptr; // local pointer on core running the server thread 131 138 uint32_t server_lid; // core running the server thread local index 132 xptr_t lock_xp; // extended pointer on lock protecting the chdev queue139 xptr_t lock_xp; // extended pointer on lock protecting the chdev state 133 140 uint32_t save_sr; // for critical section 134 141 … … 147 154 chdev_t * chdev_ptr = GET_PTR( chdev_xp ); 148 155 156 // check calling thread can yield 157 assert( (this->busylocks == 0), 158 "cannot yield : busylocks = %d\n", this->busylocks ); 159 149 160 // get local and extended pointers on server thread 150 161 server_ptr = (thread_t *)hal_remote_lpt( XPTR( chdev_cxy , &chdev_ptr->server) ); … … 155 166 156 167 // get server core local index 157 server_lid = hal_remote_l w( XPTR( chdev_cxy , &core_ptr->lid ) );168 server_lid = hal_remote_l32( XPTR( chdev_cxy , &core_ptr->lid ) ); 158 169 159 170 #if (DEBUG_CHDEV_CMD_RX || DEBUG_CHDEV_CMD_TX) 160 bool_t is_rx = hal_remote_l w( XPTR( chdev_cxy , &chdev_ptr->is_rx ) );171 bool_t is_rx = hal_remote_l32( XPTR( chdev_cxy , &chdev_ptr->is_rx ) ); 161 172 #endif 162 173 … … 185 196 186 197 // build extended pointer on lock protecting chdev waiting queue 187 lock_xp = XPTR( chdev_cxy , &chdev_ptr->wait_lock );198 lock_xp = XPTR( chdev_cxy , &chdev_ptr->wait_lock ); 188 199 189 200 // critical section for the following sequence: 190 // (1) take the lock protecting waiting queue201 // (1) take the lock protecting the chdev state 191 202 // (2) block the client thread 192 203 // (3) unblock the server thread if required … … 200 211 hal_disable_irq( &save_sr ); 201 212 202 // take the lock protecting chdev waitingqueue203 remote_ spinlock_lock( lock_xp );213 // take the lock protecting chdev queue 214 remote_busylock_acquire( lock_xp ); 204 215 205 216 // block current thread … … 217 228 218 229 // unblock server thread if required 219 if( hal_remote_l w( blocked_xp ) & THREAD_BLOCKED_IDLE )230 if( hal_remote_l32( blocked_xp ) & THREAD_BLOCKED_IDLE ) 220 231 thread_unblock( server_xp , THREAD_BLOCKED_IDLE ); 221 232 … … 243 254 #endif 244 255 245 // send IPI to core running the server thread when server != client256 // send IPI to core running the server thread when server core != client core 246 257 if( (server_lid != this->core->lid) || (local_cxy != chdev_cxy) ) 247 258 { … … 262 273 } 263 274 264 // release lock 265 remote_ spinlock_unlock( lock_xp );275 // release lock protecting chdev queue 276 remote_busylock_release( lock_xp ); 266 277 267 278 // deschedule 268 assert( thread_can_yield() , "illegal sched_yield\n" );269 279 sched_yield("blocked on I/O"); 270 280 … … 308 318 server = CURRENT_THREAD; 309 319 310 // get root and lock on commandqueue320 // build extended pointer on root of client threads queue 311 321 root_xp = XPTR( local_cxy , &chdev->wait_root ); 322 323 // build extended pointer on lock protecting client threads queue 312 324 lock_xp = XPTR( local_cxy , &chdev->wait_lock ); 313 325 … … 316 328 while( 1 ) 317 329 { 330 331 #if DEBUG_CHDEV_SERVER_RX 332 uint32_t rx_cycle = (uint32_t)hal_get_cycles(); 333 if( (chdev->is_rx) && (DEBUG_CHDEV_SERVER_RX < rx_cycle) ) 334 printk("\n[DBG] %s : dev_thread %x start RX / cycle %d\n", 335 __FUNCTION__ , server->trdid , rx_cycle ); 336 #endif 337 338 #if DEBUG_CHDEV_SERVER_TX 339 uint32_t tx_cycle = (uint32_t)hal_get_cycles(); 340 if( (chdev->is_rx == 0) && (DEBUG_CHDEV_SERVER_TX < tx_cycle) ) 341 printk("\n[DBG] %s : dev_thread %x start TX / cycle %d\n", 342 __FUNCTION__ , server->trdid , tx_cycle ); 343 #endif 344 318 345 // get the lock protecting the waiting queue 319 remote_ spinlock_lock( lock_xp );346 remote_busylock_acquire( lock_xp ); 320 347 321 348 // check waiting queue state 322 349 if( xlist_is_empty( root_xp ) ) // waiting queue empty 323 350 { 351 352 #if DEBUG_CHDEV_SERVER_RX 353 rx_cycle = (uint32_t)hal_get_cycles(); 354 if( (chdev->is_rx) && (DEBUG_CHDEV_SERVER_RX < rx_cycle) ) 355 printk("\n[DBG] %s : dev_thread %x found RX queue empty => blocks / cycle %d\n", 356 __FUNCTION__ , server->trdid , rx_cycle ); 357 #endif 358 359 #if DEBUG_CHDEV_SERVER_TX 360 tx_cycle = (uint32_t)hal_get_cycles(); 361 if( (chdev->is_rx == 0) && (DEBUG_CHDEV_SERVER_TX < tx_cycle) ) 362 printk("\n[DBG] %s : dev_thread %x found TX queue empty => blocks / cycle %d\n", 363 __FUNCTION__ , server->trdid , tx_cycle ); 364 #endif 365 324 366 // release lock 325 remote_ spinlock_unlock( lock_xp );367 remote_busylock_release( lock_xp ); 326 368 327 369 // block 328 370 thread_block( XPTR( local_cxy , server ) , THREAD_BLOCKED_IDLE ); 329 371 372 // check server thread can yield 373 assert( (server->busylocks == 0), 374 "cannot yield : busylocks = %d\n", server->busylocks ); 375 330 376 // deschedule 331 assert( thread_can_yield() , "illegal sched_yield\n" );332 377 sched_yield("I/O queue empty"); 333 378 } … … 335 380 { 336 381 // get extended pointer on first client thread 337 client_xp = XLIST_FIRST _ELEMENT( root_xp , thread_t , wait_list );382 client_xp = XLIST_FIRST( root_xp , thread_t , wait_list ); 338 383 339 384 // get client thread cluster and local pointer … … 345 390 346 391 // release lock 347 remote_ spinlock_unlock( lock_xp );392 remote_busylock_release( lock_xp ); 348 393 349 394 #if DEBUG_CHDEV_SERVER_RX 350 uint32_trx_cycle = (uint32_t)hal_get_cycles();395 rx_cycle = (uint32_t)hal_get_cycles(); 351 396 if( (chdev->is_rx) && (DEBUG_CHDEV_SERVER_RX < rx_cycle) ) 352 printk("\n[DBG] %s : server_thread %x start RX / client%x / cycle %d\n",353 __FUNCTION__ , server , client_ptr, rx_cycle );397 printk("\n[DBG] %s : dev_thread %x for RX found client thread %x in process %x / cycle %d\n", 398 __FUNCTION__, server->trdid ,client_ptr->trdid ,client_ptr->process->pid, rx_cycle ); 354 399 #endif 355 400 356 401 #if DEBUG_CHDEV_SERVER_TX 357 uint32_ttx_cycle = (uint32_t)hal_get_cycles();402 tx_cycle = (uint32_t)hal_get_cycles(); 358 403 if( (chdev->is_rx == 0) && (DEBUG_CHDEV_SERVER_TX < tx_cycle) ) 359 printk("\n[DBG] %s : server_thread %x start TX / client%x / cycle %d\n",360 __FUNCTION__ , server , client_ptr, tx_cycle );404 printk("\n[DBG] %s : dev_thread %x for TX found client thread %x in process %x / cycle %d\n", 405 __FUNCTION__, server->trdid ,client_ptr->trdid ,client_ptr->process->pid, tx_cycle ); 361 406 #endif 362 407 … … 378 423 rx_cycle = (uint32_t)hal_get_cycles(); 379 424 if( (chdev->is_rx) && (DEBUG_CHDEV_SERVER_RX < rx_cycle) ) 380 printk("\n[DBG] %s : server_thread %x completes RX / client%x / cycle %d\n",381 __FUNCTION__ , server , client_ptr, rx_cycle );425 printk("\n[DBG] %s : dev_thread %x completes RX for client %x in process %x / cycle %d\n", 426 __FUNCTION__, server->trdid, client_ptr->trdid, client_ptr->process->pid, rx_cycle ); 382 427 #endif 383 428 … … 385 430 tx_cycle = (uint32_t)hal_get_cycles(); 386 431 if( (chdev->is_rx == 0) && (DEBUG_CHDEV_SERVER_TX < tx_cycle) ) 387 printk("\n[DBG] %s : server_thread %x completes TX / client%x / cycle %d\n",388 __FUNCTION__ , server , client_ptr, tx_cycle );432 printk("\n[DBG] %s : dev_thread %x completes TX for client %x in process %x / cycle %d\n", 433 __FUNCTION__, server->trdid, client_ptr->trdid, client_ptr->process->pid, tx_cycle ); 389 434 #endif 390 435 … … 419 464 420 465 // get inode type from file descriptor 421 inode_type = hal_remote_l w( XPTR( file_cxy , &file_ptr->type ) );466 inode_type = hal_remote_l32( XPTR( file_cxy , &file_ptr->type ) ); 422 467 inode_ptr = (vfs_inode_t *)hal_remote_lpt( XPTR( file_cxy , &file_ptr->inode ) ); 423 468 … … 432 477 } // end chdev_from_file() 433 478 434 //////////////////////// 479 ////////////////////////////// 435 480 void chdev_dir_display( void ) 436 481 { … … 439 484 chdev_t * ptr; 440 485 uint32_t base; 441 reg_t save_sr;442 486 443 487 // get pointers on TXT0 chdev … … 446 490 chdev_t * txt0_ptr = GET_PTR( txt0_xp ); 447 491 448 // get extended pointer on remote TXT0 chdevlock492 // get extended pointer on TXT0 lock 449 493 xptr_t lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock ); 450 494 451 // get TXT0 lock in busy waiting mode452 remote_ spinlock_lock_busy( lock_xp , &save_sr);495 // get TXT0 lock 496 remote_busylock_acquire( lock_xp ); 453 497 454 498 // header … … 456 500 457 501 // IOB 458 if (chdev_dir.iob != NULL )502 if (chdev_dir.iob != XPTR_NULL ) 459 503 { 460 504 cxy = GET_CXY( chdev_dir.iob ); 461 505 ptr = GET_PTR( chdev_dir.iob ); 462 base = (uint32_t)hal_remote_l wd( XPTR( cxy , &ptr->base ) );506 base = (uint32_t)hal_remote_l64( XPTR( cxy , &ptr->base ) ); 463 507 nolock_printk(" - iob : cxy = %X / ptr = %X / base = %X\n", cxy, ptr, base); 464 508 } … … 467 511 cxy = GET_CXY( chdev_dir.pic ); 468 512 ptr = GET_PTR( chdev_dir.pic ); 469 base = (uint32_t)hal_remote_l wd( XPTR( cxy , &ptr->base ) );513 base = (uint32_t)hal_remote_l64( XPTR( cxy , &ptr->base ) ); 470 514 nolock_printk(" - pic : cxy = %X / ptr = %X / base = %X\n", cxy, ptr, base); 471 515 … … 475 519 cxy = GET_CXY( chdev_dir.txt_rx[i] ); 476 520 ptr = GET_PTR( chdev_dir.txt_rx[i] ); 477 base = (uint32_t)hal_remote_l wd( XPTR( cxy , &ptr->base ) );521 base = (uint32_t)hal_remote_l64( XPTR( cxy , &ptr->base ) ); 478 522 nolock_printk(" - txt_rx[%d] : cxy = %X / ptr = %X / base = %X\n", i, cxy, ptr, base); 479 523 480 524 cxy = GET_CXY( chdev_dir.txt_tx[i] ); 481 525 ptr = GET_PTR( chdev_dir.txt_tx[i] ); 482 base = (uint32_t)hal_remote_l wd( XPTR( cxy , &ptr->base ) );526 base = (uint32_t)hal_remote_l64( XPTR( cxy , &ptr->base ) ); 483 527 nolock_printk(" - txt_tx[%d] : cxy = %X / ptr = %X / base = %X\n", i, cxy, ptr, base); 484 528 } … … 489 533 cxy = GET_CXY( chdev_dir.ioc[i] ); 490 534 ptr = GET_PTR( chdev_dir.ioc[i] ); 491 base = (uint32_t)hal_remote_l wd( XPTR( cxy , &ptr->base ) );535 base = (uint32_t)hal_remote_l64( XPTR( cxy , &ptr->base ) ); 492 536 nolock_printk(" - ioc[%d] : cxy = %X / ptr = %X / base = %X\n", i, cxy, ptr, base); 493 537 } … … 498 542 cxy = GET_CXY( chdev_dir.fbf[i] ); 499 543 ptr = GET_PTR( chdev_dir.fbf[i] ); 500 base = (uint32_t)hal_remote_l wd( XPTR( cxy , &ptr->base ) );544 base = (uint32_t)hal_remote_l64( XPTR( cxy , &ptr->base ) ); 501 545 nolock_printk(" - fbf[%d] : cxy = %X / ptr = %X / base = %X\n", i, cxy, ptr, base); 502 546 } … … 507 551 cxy = GET_CXY( chdev_dir.nic_rx[i] ); 508 552 ptr = GET_PTR( chdev_dir.nic_rx[i] ); 509 base = (uint32_t)hal_remote_l wd( XPTR( cxy , &ptr->base ) );553 base = (uint32_t)hal_remote_l64( XPTR( cxy , &ptr->base ) ); 510 554 nolock_printk(" - nic_rx[%d] : cxy = %X / ptr = %X / base = %X\n", i, cxy, ptr, base); 511 555 512 556 cxy = GET_CXY( chdev_dir.nic_tx[i] ); 513 557 ptr = GET_PTR( chdev_dir.nic_tx[i] ); 514 base = (uint32_t)hal_remote_l wd( XPTR( cxy , &ptr->base ) );558 base = (uint32_t)hal_remote_l64( XPTR( cxy , &ptr->base ) ); 515 559 nolock_printk(" - nic_tx[%d] : cxy = %X / ptr = %X / base = %X\n", i, cxy, ptr, base); 516 560 } 517 561 518 562 // release lock 519 remote_ spinlock_unlock_busy( lock_xp , save_sr);563 remote_busylock_release( lock_xp ); 520 564 521 565 } // end chdev_dir_display() … … 546 590 hal_remote_strcpy( XPTR( local_cxy , name ), XPTR( chdev_cxy , chdev_ptr->name ) ); 547 591 592 // get pointers on TXT0 chdev 593 xptr_t txt0_xp = chdev_dir.txt_tx[0]; 594 cxy_t txt0_cxy = GET_CXY( txt0_xp ); 595 chdev_t * txt0_ptr = GET_PTR( txt0_xp ); 596 597 // get extended pointer on TXT0 lock 598 xptr_t lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock ); 599 600 // get TXT0 lock 601 remote_busylock_acquire( lock_xp ); 602 548 603 // check queue empty 549 604 if( xlist_is_empty( root_xp ) ) 550 605 { 551 printk("\n***** Waiting queue empty for chdev %s\n", name );606 nolock_printk("\n***** Waiting queue empty for chdev %s\n", name ); 552 607 } 553 608 else 554 609 { 555 printk("\n***** Waiting queue for chdev %s\n", name );610 nolock_printk("\n***** Waiting queue for chdev %s\n", name ); 556 611 557 612 // scan the waiting queue … … 561 616 thread_cxy = GET_CXY( thread_xp ); 562 617 thread_ptr = GET_PTR( thread_xp ); 563 trdid = hal_remote_l w( XPTR( thread_cxy , &thread_ptr->trdid ) );618 trdid = hal_remote_l32 ( XPTR( thread_cxy , &thread_ptr->trdid ) ); 564 619 process = hal_remote_lpt( XPTR( thread_cxy , &thread_ptr->process ) ); 565 pid = hal_remote_l w( XPTR( thread_cxy , &process->pid ) );566 567 printk("- thread %X / cluster %X / trdid %X / pid %X\n",620 pid = hal_remote_l32 ( XPTR( thread_cxy , &process->pid ) ); 621 622 nolock_printk("- thread %X / cluster %X / trdid %X / pid %X\n", 568 623 thread_ptr, thread_cxy, trdid, pid ); 569 624 } 570 625 } 626 627 // release TXT0 lock 628 remote_busylock_release( lock_xp ); 629 571 630 } // end chdev_queue_display() 572 631 -
trunk/kernel/kern/chdev.h
r485 r564 28 28 #include <hal_kernel_types.h> 29 29 #include <xlist.h> 30 #include <remote_ spinlock.h>30 #include <remote_busylock.h> 31 31 #include <dev_iob.h> 32 32 #include <dev_ioc.h> … … 43 43 * ALMOS-MKH supports multi-channels peripherals, and defines one separated chdev 44 44 * descriptor for each channel (and for each RX/TX direction for the NIC and TXT devices). 45 * Each chdev contains a waiting queue, registering the "client threads" requests,45 * Each chdev contains a trans-clusters waiting queue, registering the "client threads", 46 46 * and an associated "server thread", handling these requests. 47 47 * These descriptors are physically distributed on all clusters to minimize contention. … … 116 116 * of client threads is associated to each chdev descriptor (not for ICU, PIC, IOB). 117 117 * For each device type ***, the specific extension is defined in the "dev_***.h" file. 118 * 119 * NOTE : For most chdevs, the busylock is used to protect the waiting queue changes, 120 * when a thread register in this queue, or is removed after service. 121 * This busylock is also used to protect direct access to the kernel TXT0 terminal 122 * (without using the server thread). 118 123 *****************************************************************************************/ 119 124 … … 136 141 uint32_t irq_id; /*! associated IRQ index in local ICU */ 137 142 138 remote_spinlock_t wait_lock; /*! lock protecting exclusive access to queue*/139 xlist_entry_t wait_root; /*! root of waiting threadsqueue */143 xlist_entry_t wait_root; /*! root of client threads waiting queue */ 144 remote_busylock_t wait_lock; /*! lock protecting waiting queue */ 140 145 141 146 union -
trunk/kernel/kern/cluster.c
r562 r564 29 29 #include <hal_special.h> 30 30 #include <hal_ppm.h> 31 #include <hal_macros.h> 31 32 #include <remote_fifo.h> 32 33 #include <printk.h> 33 34 #include <errno.h> 34 #include < spinlock.h>35 #include <queuelock.h> 35 36 #include <core.h> 36 37 #include <chdev.h> … … 45 46 #include <process.h> 46 47 #include <dqdt.h> 47 #include <cluster_info.h>48 48 49 49 ///////////////////////////////////////////////////////////////////////////////////// … … 51 51 ///////////////////////////////////////////////////////////////////////////////////// 52 52 53 extern process_t process_zero; // allocated in kernel_init.c file 54 extern chdev_directory_t chdev_dir; // allocated in kernel_init.c file 55 56 ///////////////////////////////////////////////n 57 error_t cluster_init( struct boot_info_s * info ) 58 { 59 error_t error; 60 lpid_t lpid; // local process_index 61 lid_t lid; // local core index 62 uint32_t i; // index in loop on external peripherals 53 extern process_t process_zero; // allocated in kernel_init.c 54 extern chdev_directory_t chdev_dir; // allocated in kernel_init.c 55 56 57 58 /////////////////////////////////////////////////// 59 void cluster_info_init( struct boot_info_s * info ) 60 { 63 61 boot_device_t * dev; // pointer on external peripheral 64 62 uint32_t func; // external peripheral functionnal type 63 uint32_t x; 64 uint32_t y; 65 uint32_t i; 65 66 66 67 cluster_t * cluster = LOCAL_CLUSTER; … … 75 76 76 77 // initialize the cluster_info[][] array 77 int x;78 int y;79 for (x = 0; x < CONFIG_MAX_CLUSTERS_X; x++) {80 for (y = 0; y < CONFIG_MAX_CLUSTERS_Y;y++){78 for (x = 0; x < CONFIG_MAX_CLUSTERS_X; x++) 79 { 80 for (y = 0; y < CONFIG_MAX_CLUSTERS_Y;y++) 81 { 81 82 cluster->cluster_info[x][y] = info->cluster_info[x][y]; 82 83 } 83 84 } 85 84 86 // initialize external peripherals channels 85 87 for( i = 0 ; i < info->ext_dev_nr ; i++ ) … … 93 95 } 94 96 95 // initialize cluster local parameters 96 cluster->cores_nr = info->cores_nr; 97 // initialize number of cores 98 cluster->cores_nr = info->cores_nr; 99 100 } // end cluster_info_init() 101 102 ///////////////////////////////////////////////////////// 103 error_t cluster_manager_init( struct boot_info_s * info ) 104 { 105 error_t error; 106 lpid_t lpid; // local process_index 107 lid_t lid; // local core index 108 109 cluster_t * cluster = LOCAL_CLUSTER; 97 110 98 111 // initialize the lock protecting the embedded kcm allocator 99 spinlock_init( &cluster->kcm_lock);112 busylock_init( &cluster->kcm_lock , LOCK_CLUSTER_KCM ); 100 113 101 114 #if DEBUG_CLUSTER_INIT 102 115 uint32_t cycle = (uint32_t)hal_get_cycles(); 103 116 if( DEBUG_CLUSTER_INIT < cycle ) 104 printk("\n[DBG] %s : thread %x enters for cluster %x / cycle %d\n",105 __FUNCTION__ , CURRENT_THREAD, local_cxy , cycle );117 printk("\n[DBG] %s : thread %x in process %x enters for cluster %x / cycle %d\n", 118 __FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid, local_cxy , cycle ); 106 119 #endif 107 120 108 121 // initialises DQDT 109 122 cluster->dqdt_root_level = dqdt_init( info->x_size, 110 info->y_size, 111 info->y_width ) - 1; 123 info->y_size ) - 1; 124 125 #if( DEBUG_CLUSTER_INIT & 1 ) 126 cycle = (uint32_t)hal_get_cycles(); 127 if( DEBUG_CLUSTER_INIT < cycle ) 128 printk("\n[DBG] %s : DQDT initialized in cluster %x / cycle %d\n", 129 __FUNCTION__ , local_cxy , cycle ); 130 #endif 112 131 113 132 // initialises embedded PPM … … 166 185 for( lid = 0 ; lid < cluster->cores_nr; lid++ ) 167 186 { 168 local_fifo_init( &cluster->rpc_fifo[lid] );187 remote_fifo_init( &cluster->rpc_fifo[lid] ); 169 188 cluster->rpc_threads[lid] = 0; 170 189 } … … 178 197 179 198 // initialise pref_tbl[] in process manager 180 spinlock_init( &cluster->pmgr.pref_lock);199 queuelock_init( &cluster->pmgr.pref_lock , LOCK_CLUSTER_PREFTBL ); 181 200 cluster->pmgr.pref_nr = 0; 182 201 cluster->pmgr.pref_tbl[0] = XPTR( local_cxy , &process_zero ); … … 187 206 188 207 // initialise local_list in process manager 189 remote_spinlock_init( XPTR( local_cxy , &cluster->pmgr.local_lock ) );190 208 xlist_root_init( XPTR( local_cxy , &cluster->pmgr.local_root ) ); 191 209 cluster->pmgr.local_nr = 0; 210 remote_queuelock_init( XPTR( local_cxy , &cluster->pmgr.local_lock ) , 211 LOCK_CLUSTER_LOCALS ); 192 212 193 213 // initialise copies_lists in process manager 194 214 for( lpid = 0 ; lpid < CONFIG_MAX_PROCESS_PER_CLUSTER ; lpid++ ) 195 215 { 196 remote_spinlock_init( XPTR( local_cxy , &cluster->pmgr.copies_lock[lpid] ) );197 216 cluster->pmgr.copies_nr[lpid] = 0; 198 217 xlist_root_init( XPTR( local_cxy , &cluster->pmgr.copies_root[lpid] ) ); 218 remote_queuelock_init( XPTR( local_cxy , &cluster->pmgr.copies_lock[lpid] ), 219 LOCK_CLUSTER_COPIES ); 199 220 } 200 221 … … 202 223 cycle = (uint32_t)hal_get_cycles(); 203 224 if( DEBUG_CLUSTER_INIT < cycle ) 204 printk("\n[DBG] %s , thread%x exit for cluster %x / cycle %d\n",205 __FUNCTION__ , CURRENT_THREAD, local_cxy , cycle );225 printk("\n[DBG] %s : thread %x in process %x exit for cluster %x / cycle %d\n", 226 __FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid , local_cxy , cycle ); 206 227 #endif 207 228 … … 209 230 210 231 return 0; 211 } // end cluster_ init()212 213 ///////////////////////////////// 232 } // end cluster_manager_init() 233 234 /////////////////////////////////// 214 235 cxy_t cluster_random_select( void ) 215 236 { 216 uint32_t x_size;217 uint32_t y_size;218 uint32_t y_width;219 237 uint32_t index; 220 uint32_t x; 238 uint32_t x; 221 239 uint32_t y; 222 223 do { 224 x_size = LOCAL_CLUSTER->x_size; 225 y_size = LOCAL_CLUSTER->y_size; 226 y_width = LOCAL_CLUSTER->y_width; 240 cxy_t cxy; 241 242 uint32_t x_size = LOCAL_CLUSTER->x_size; 243 uint32_t y_size = LOCAL_CLUSTER->y_size; 244 245 do 246 { 227 247 index = ( hal_get_cycles() + hal_get_gid() ) % (x_size * y_size); 228 248 x = index / y_size; 229 249 y = index % y_size; 230 } while ( cluster_info_is_active( LOCAL_CLUSTER->cluster_info[x][y] ) == 0 ); 231 232 return (x<<y_width) + y; 250 cxy = HAL_CXY_FROM_XY( x , y ); 251 } 252 while ( cluster_is_active( cxy ) == false ); 253 254 return ( cxy ); 233 255 } 234 256 … … 236 258 bool_t cluster_is_undefined( cxy_t cxy ) 237 259 { 238 cluster_t * cluster = LOCAL_CLUSTER; 239 240 uint32_t y_width = cluster->y_width; 241 242 uint32_t x = cxy >> y_width; 243 uint32_t y = cxy & ((1<<y_width)-1); 244 245 if( x >= cluster->x_size ) return true; 246 if( y >= cluster->y_size ) return true; 260 uint32_t x_size = LOCAL_CLUSTER->x_size; 261 uint32_t y_size = LOCAL_CLUSTER->y_size; 262 263 uint32_t x = HAL_X_FROM_CXY( cxy ); 264 uint32_t y = HAL_Y_FROM_CXY( cxy ); 265 266 if( x >= x_size ) return true; 267 if( y >= y_size ) return true; 247 268 248 269 return false; 270 } 271 272 ////////////////////////////////////// 273 bool_t cluster_is_active ( cxy_t cxy ) 274 { 275 uint32_t x = HAL_X_FROM_CXY( cxy ); 276 uint32_t y = HAL_Y_FROM_CXY( cxy ); 277 278 return ( LOCAL_CLUSTER->cluster_info[x][y] != 0 ); 249 279 } 250 280 … … 304 334 305 335 // take the lock protecting the list of processes 306 remote_ spinlock_lock( lock_xp );336 remote_queuelock_acquire( lock_xp ); 307 337 308 338 // scan list of processes … … 320 350 321 351 // release the lock protecting the list of processes 322 remote_ spinlock_unlock( lock_xp );352 remote_queuelock_release( lock_xp ); 323 353 324 354 // return extended pointer on process descriptor in owner cluster … … 350 380 351 381 // take the lock protecting the list of processes 352 remote_ spinlock_lock( lock_xp );382 remote_queuelock_acquire( lock_xp ); 353 383 354 384 // scan list of processes in owner cluster … … 358 388 current_xp = XLIST_ELEMENT( iter_xp , process_t , local_list ); 359 389 current_ptr = GET_PTR( current_xp ); 360 current_pid = hal_remote_l w( XPTR( owner_cxy , ¤t_ptr->pid ) );390 current_pid = hal_remote_l32( XPTR( owner_cxy , ¤t_ptr->pid ) ); 361 391 362 392 if( current_pid == pid ) … … 368 398 369 399 // release the lock protecting the list of processes 370 remote_ spinlock_unlock( lock_xp );400 remote_queuelock_release( lock_xp ); 371 401 372 402 // return extended pointer on process descriptor in owner cluster … … 397 427 else // use a remote_lwd to access owner cluster 398 428 { 399 ref_xp = (xptr_t)hal_remote_l wd( XPTR( owner_cxy , &cluster->pmgr.pref_tbl[lpid] ) );429 ref_xp = (xptr_t)hal_remote_l64( XPTR( owner_cxy , &cluster->pmgr.pref_tbl[lpid] ) ); 400 430 } 401 431 … … 419 449 pmgr_t * pm = &LOCAL_CLUSTER->pmgr; 420 450 421 // get the process manager lock422 spinlock_lock( &pm->pref_lock );451 // get the lock protecting pref_tbl 452 queuelock_acquire( &pm->pref_lock ); 423 453 424 454 // search an empty slot … … 443 473 444 474 // release the processs_manager lock 445 spinlock_unlock( &pm->pref_lock );475 queuelock_release( &pm->pref_lock ); 446 476 447 477 return 0; … … 449 479 else 450 480 { 451 // release the processs_managerlock452 spinlock_unlock( &pm->pref_lock );453 454 return -1;481 // release the lock 482 queuelock_release( &pm->pref_lock ); 483 484 return 0xFFFFFFFF; 455 485 } 456 486 … … 488 518 "local_cluster %x != owner_cluster %x" , local_cxy , owner_cxy ); 489 519 490 // get the process manager lock491 spinlock_lock( &pm->pref_lock );520 // get the lock protecting pref_tbl 521 queuelock_acquire( &pm->pref_lock ); 492 522 493 523 // remove process from pref_tbl[] … … 496 526 497 527 // release the processs_manager lock 498 spinlock_unlock( &pm->pref_lock );528 queuelock_release( &pm->pref_lock ); 499 529 500 530 #if DEBUG_CLUSTER_PID_RELEASE … … 538 568 void cluster_process_local_link( process_t * process ) 539 569 { 540 reg_t save_sr;541 542 570 pmgr_t * pm = &LOCAL_CLUSTER->pmgr; 543 571 … … 546 574 xptr_t lock_xp = XPTR( local_cxy , &pm->local_lock ); 547 575 548 // get lock protecting the process managerlocal list549 remote_ spinlock_lock_busy( lock_xp , &save_sr);576 // get lock protecting the local list 577 remote_queuelock_acquire( lock_xp ); 550 578 551 579 // register process in local list … … 553 581 pm->local_nr++; 554 582 555 // release lock protecting the process managerlocal list556 remote_ spinlock_unlock_busy( lock_xp , save_sr);583 // release lock protecting the local list 584 remote_queuelock_release( lock_xp ); 557 585 } 558 586 … … 560 588 void cluster_process_local_unlink( process_t * process ) 561 589 { 562 reg_t save_sr;563 564 590 pmgr_t * pm = &LOCAL_CLUSTER->pmgr; 565 591 … … 567 593 xptr_t lock_xp = XPTR( local_cxy , &pm->local_lock ); 568 594 569 // get lock protecting the process managerlocal list570 remote_ spinlock_lock_busy( lock_xp , &save_sr);595 // get lock protecting the local list 596 remote_queuelock_acquire( lock_xp ); 571 597 572 598 // remove process from local list … … 574 600 pm->local_nr--; 575 601 576 // release lock protecting the process managerlocal list577 remote_ spinlock_unlock_busy( lock_xp , save_sr);602 // release lock protecting the local list 603 remote_queuelock_release( lock_xp ); 578 604 } 579 605 … … 581 607 void cluster_process_copies_link( process_t * process ) 582 608 { 583 reg_t irq_state;584 609 pmgr_t * pm = &LOCAL_CLUSTER->pmgr; 585 610 … … 606 631 607 632 // get lock protecting copies_list[lpid] 608 remote_ spinlock_lock_busy( copies_lock , &irq_state);633 remote_queuelock_acquire( copies_lock ); 609 634 610 635 // add copy to copies_list … … 613 638 614 639 // release lock protecting copies_list[lpid] 615 remote_ spinlock_unlock_busy( copies_lock , irq_state);640 remote_queuelock_release( copies_lock ); 616 641 617 642 #if DEBUG_CLUSTER_PROCESS_COPIES … … 627 652 void cluster_process_copies_unlink( process_t * process ) 628 653 { 629 uint32_t irq_state;630 654 pmgr_t * pm = &LOCAL_CLUSTER->pmgr; 631 655 … … 649 673 650 674 // get lock protecting copies_list[lpid] 651 remote_ spinlock_lock_busy( copies_lock , &irq_state);675 remote_queuelock_acquire( copies_lock ); 652 676 653 677 // remove copy from copies_list … … 656 680 657 681 // release lock protecting copies_list[lpid] 658 remote_ spinlock_unlock_busy( copies_lock , irq_state);682 remote_queuelock_release( copies_lock ); 659 683 660 684 #if DEBUG_CLUSTER_PROCESS_COPIES … … 678 702 xptr_t txt0_xp; 679 703 xptr_t txt0_lock_xp; 680 reg_t txt0_save_sr; // save SR to take TXT0 lock in busy mode681 704 682 705 assert( (cluster_is_undefined( cxy ) == false), … … 696 719 697 720 // get lock on local process list 698 remote_ spinlock_lock( lock_xp );699 700 // get TXT0 lock in busy waiting mode701 remote_ spinlock_lock_busy( txt0_lock_xp , &txt0_save_sr);721 remote_queuelock_acquire( lock_xp ); 722 723 // get TXT0 lock 724 remote_busylock_acquire( txt0_lock_xp ); 702 725 703 726 // display header … … 712 735 } 713 736 714 // release TXT0 lock in busy waiting mode715 remote_ spinlock_unlock_busy( txt0_lock_xp , txt0_save_sr);737 // release TXT0 lock 738 remote_busylock_release( txt0_lock_xp ); 716 739 717 740 // release lock on local process list 718 remote_ spinlock_unlock( lock_xp );741 remote_queuelock_release( lock_xp ); 719 742 720 743 } // end cluster_processes_display() -
trunk/kernel/kern/cluster.h
r562 r564 30 30 #include <hal_kernel_types.h> 31 31 #include <bits.h> 32 #include <spinlock.h> 33 #include <readlock.h> 34 #include <remote_barrier.h> 32 #include <queuelock.h> 33 #include <remote_queuelock.h> 35 34 #include <list.h> 36 35 #include <xlist.h> … … 68 67 * 2) The local_root is the root of the local list of all process descriptors in cluster K. 69 68 * A process descriptor P is present in K, as soon as P has a thread in cluster K. 69 * We use an xlist, because this list can be traversed by remote threads. 70 70 * 71 71 * 3) The copies_root[] array is indexed by lpid. There is one entry per owned process, 72 72 * and each each entry contains the root of the xlist of copies for this process. 73 $ We use an xlist, because process copies are distributed in all clusters. 73 74 ******************************************************************************************/ 74 75 75 76 typedef struct process_manager_s 76 77 { 77 xptr_t pref_tbl[CONFIG_MAX_PROCESS_PER_CLUSTER]; /*! reference process*/78 spinlock_t pref_lock; /*! lock protecting lpid allocation/release*/79 uint32_t pref_nr; /*! number of processes owned by cluster*/80 81 xlist_entry_t local_root;/*! root of list of process in cluster */82 remote_ spinlock_t local_lock; /*! lock protecting access to local list*/83 uint32_t local_nr;/*! number of process in cluster */84 85 xlist_entry_t copies_root[CONFIG_MAX_PROCESS_PER_CLUSTER]; /*! roots of lists*/86 remote_ spinlock_t copies_lock[CONFIG_MAX_PROCESS_PER_CLUSTER]; /*! one lock per list*/87 uint32_t copies_nr[CONFIG_MAX_PROCESS_PER_CLUSTER]; /*! number of copies*/78 xptr_t pref_tbl[CONFIG_MAX_PROCESS_PER_CLUSTER]; /*! owned processes */ 79 queuelock_t pref_lock; /*! lock protecting pref_tbl */ 80 uint32_t pref_nr; /*! number of processes owned by cluster */ 81 82 xlist_entry_t local_root; /*! root of list of process in cluster */ 83 remote_queuelock_t local_lock; /*! lock protecting local list */ 84 uint32_t local_nr; /*! number of process in cluster */ 85 86 xlist_entry_t copies_root[CONFIG_MAX_PROCESS_PER_CLUSTER]; /*! roots of lists */ 87 remote_queuelock_t copies_lock[CONFIG_MAX_PROCESS_PER_CLUSTER]; /*! one per list */ 88 uint32_t copies_nr[CONFIG_MAX_PROCESS_PER_CLUSTER]; /*! number of copie */ 88 89 } 89 90 pmgr_t; … … 97 98 typedef struct cluster_s 98 99 { 99 spinlock_t kcm_lock; /*! local, protect creation of KCM allocators */100 100 101 101 // global parameters 102 102 uint32_t paddr_width; /*! numer of bits in physical address */ 103 103 uint32_t x_width; /*! number of bits to code x_size (can be 0) */ 104 104 uint32_t y_width; /*! number of bits to code y_size (can be 0) */ 105 uint32_t x_size; /*! number of clusters in a row (can be 1) */ 106 uint32_t y_size; /*! number of clusters in a column (can be 1) */ 107 uint32_t cluster_info[CONFIG_MAX_CLUSTERS_X][CONFIG_MAX_CLUSTERS_Y]; 108 cxy_t io_cxy; /*! io cluster identifier */ 105 uint32_t x_size; /*! number of clusters in a row (can be 1) */ 106 uint32_t y_size; /*! number of clusters in a column (can be 1) */ 107 cxy_t io_cxy; /*! io cluster identifier */ 109 108 uint32_t dqdt_root_level; /*! index of root node in dqdt_tbl[] */ 110 109 uint32_t nb_txt_channels; /*! number of TXT channels */ … … 113 112 uint32_t nb_fbf_channels; /*! number of FBF channels */ 114 113 114 char cluster_info[CONFIG_MAX_CLUSTERS_X][CONFIG_MAX_CLUSTERS_Y]; 115 115 116 // local parameters 116 117 uint32_t cores_nr; /*! actual number of cores in cluster */ 117 118 uint32_t ram_size; /*! physical memory size */ 118 119 uint32_t ram_base; /*! physical memory base (local address) */ … … 120 121 core_t core_tbl[CONFIG_MAX_LOCAL_CORES]; /*! embedded cores */ 121 122 122 123 list_entry_t dev_root; /*! root of list of devices in cluster */ 123 124 124 125 // memory allocators 125 126 127 126 ppm_t ppm; /*! embedded kernel page manager */ 127 khm_t khm; /*! embedded kernel heap manager */ 128 kcm_t kcm; /*! embedded kernel KCMs manager */ 128 129 129 130 kcm_t * kcm_tbl[KMEM_TYPES_NR]; /*! pointers on allocated KCMs */ 131 busylock_t kcm_lock; /*! protect kcm_tbl[] updates */ 130 132 131 133 // RPC 132 134 remote_fifo_t rpc_fifo[CONFIG_MAX_LOCAL_CORES]; /*! one RPC FIFO per core */ 133 135 uint32_t rpc_threads[CONFIG_MAX_LOCAL_CORES]; /*! RPC threads per core */ 134 136 135 137 // DQDT 136 138 dqdt_node_t dqdt_tbl[CONFIG_DQDT_LEVELS_NR]; /*! embedded DQDT nodes */ 137 139 138 140 // Local process manager … … 158 160 159 161 /****************************************************************************************** 160 * This generic function initialises the local cluster manager from information found 161 * in the local boot-info structure. It initializes the following local resources: 162 * - the global platform parameters, 163 * - the specific cluster parameters, 164 * - the lock protecting KCM creation, 165 * - the local DQDT nodes, 166 * - the PPM, KHM, and KCM allocators, 167 * - the local core descriptors, 168 * - the local RPC FIFO, 169 * - the process manager. 170 * It does NOT initialise the local device descriptors. 162 * These two functions initialise the local cluster manager from information found 163 * in the local boot-info structure <info> build by the boot-loader. 164 * 1) the cluster_info_init() function is called first, to initialize the structural 165 * constants, and cannot use the TXT0 kernel terminal. 166 * 2) the cluster_manager_init() function initialize various complex structures: 167 * - the local DQDT nodes, 168 * - the PPM, KHM, and KCM allocators, 169 * - the local core descriptors, 170 * - the local RPC FIFO, 171 * - the process manager. 172 * It does NOT initialise the local device descriptors. 173 * It can use the TXT0 kernel terminal. 171 174 ****************************************************************************************** 172 175 * @ info : pointer on the local boot_info_t structure build by the bootloader. 173 176 *****************************************************************************************/ 174 error_t cluster_init( boot_info_t * info ); 175 176 /****************************************************************************************** 177 * This function randomly selects a cluster. 178 ****************************************************************************************** 179 * @ returns the selected cluster identifier. 180 *****************************************************************************************/ 181 cxy_t cluster_random_select( void ); 177 void cluster_info_init( boot_info_t * info ); 178 error_t cluster_manager_init( boot_info_t * info ); 182 179 183 180 /****************************************************************************************** … … 189 186 bool_t cluster_is_undefined( cxy_t cxy ); 190 187 191 192 /*****************************************************************************************/ 193 /*************** Process Management Operations ***************************************/ 194 /*****************************************************************************************/ 188 /****************************************************************************************** 189 * This function uses the local cluster_info[][] array in cluster descriptor, 190 * and returns true when the cluster identified by the <cxy> argument is active. 191 ****************************************************************************************** 192 * @ cxy : cluster identifier. 193 * @ return true if cluster contains a kernel instance. 194 *****************************************************************************************/ 195 bool_t cluster_is_active( cxy_t cxy ); 196 197 /****************************************************************************************** 198 * This function (pseudo) randomly selects a valid cluster. 199 * It is called by the vfs_cluster_lookup() function to place a new (missing) inode. 200 * It is called by the vmm_page_allocate() function to place a distributed vseg page. 201 ****************************************************************************************** 202 * @ returns the selected cluster identifier. 203 *****************************************************************************************/ 204 cxy_t cluster_random_select( void ); 195 205 196 206 /****************************************************************************************** … … 290 300 void cluster_process_copies_unlink( struct process_s * process ); 291 301 292 /****************************************************************************************** ***302 /****************************************************************************************** 293 303 * This function displays on the kernel terminal TXT0 all user processes registered 294 304 * in the cluster defined by the <cxy> argument. 295 305 * It can be called by a thread running in any cluster, because is use remote accesses 296 306 * to scan the xlist of registered processes. 297 ****************************************************************************************** ***307 ****************************************************************************************** 298 308 * @ cxy : cluster identifier. 299 ***************************************************************************************** ***/309 *****************************************************************************************/ 300 310 void cluster_processes_display( cxy_t cxy ); 301 311 302 303 304 /*****************************************************************************************/ 305 /*************** Cores Management Operations *****************************************/ 306 /*****************************************************************************************/ 307 308 /****************************************************************************************** 309 * This function returns the core local index that has the lowest usage in local cluster. 312 /****************************************************************************************** 313 * This function uses the local boot_inforeturns the core local index that has the lowest usage in local cluster. 310 314 *****************************************************************************************/ 311 315 lid_t cluster_select_local_core( void ); 312 316 317 313 318 #endif /* _CLUSTER_H_ */ 319 -
trunk/kernel/kern/core.c
r457 r564 3 3 * 4 4 * Author Ghassan Almaless (2008,2009,2010,2011,2012) 5 * Alain Greiner (2016,2017 )5 * Alain Greiner (2016,2017,2018) 6 6 * 7 7 * Copyright (c) UPMC Sorbonne Universites -
trunk/kernel/kern/core.h
r457 r564 3 3 * 4 4 * Authors Ghassan Almaless (2008,2009,2010,2011,2012) 5 * Alain Greiner (2016,2017 )5 * Alain Greiner (2016,2017,2018) 6 6 * 7 7 * Copyright (c) UPMC Sorbonne Universites -
trunk/kernel/kern/dqdt.c
r562 r564 40 40 extern chdev_directory_t chdev_dir; // defined in chdev.h / allocated in kernel_init.c 41 41 42 42 /* 43 43 /////////////////////////////////////////////////////////////////////////////////////////// 44 44 // This static recursive function traverse the DQDT quad-tree from root to bottom. … … 65 65 } 66 66 } 67 68 /////////////////// 67 */ 68 69 ///////////////////////// 69 70 void dqdt_display( void ) 70 71 { 71 /*72 reg_t save_sr; 73 72 return; 73 74 /* 74 75 // build extended pointer on DQDT root node 75 76 cluster_t * cluster = LOCAL_CLUSTER; … … 82 83 chdev_t * txt0_ptr = GET_PTR( txt0_xp ); 83 84 84 // get extended pointer on remote TXT0 chdevlock85 // get extended pointer on remote TXT0 lock 85 86 xptr_t lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock ); 86 87 87 // get TXT0 lock in busy waiting mode88 remote_ spinlock_lock_busy( lock_xp , &save_sr);88 // get TXT0 lock 89 remote_busylock_acquire( lock_xp ); 89 90 90 91 // print header … … 95 96 96 97 // release lock 97 remote_spinlock_unlock_busy( lock_xp , save_sr ); 98 */ 98 remote_busylock_release( lock_xp ); 99 */ 100 99 101 } 100 102 101 103 //////////////////////////////////// 102 104 uint32_t dqdt_init( uint32_t x_size, 103 uint32_t y_size, 104 uint32_t y_width ) 105 uint32_t y_size ) 105 106 { 106 107 assert( ((x_size <= 32) && (y_size <= 32)) , "illegal mesh size\n"); 107 108 109 // compute level_max 110 uint32_t x_size_ext = POW2_ROUNDUP( x_size ); 111 uint32_t y_size_ext = POW2_ROUNDUP( y_size ); 112 uint32_t size_ext = MAX(x_size_ext , y_size_ext); 113 uint32_t level_max = (bits_log2(size_ext * size_ext) >> 1) + 1; 114 115 return level_max; 116 117 /* 108 118 dqdt_node_t * node; 109 119 cxy_t p_cxy; // cluster coordinates for parent node … … 114 124 cluster_t * cluster; // pointer on local cluster 115 125 116 cluster = LOCAL_CLUSTER; 117 118 // compute level_max 119 uint32_t x_size_ext = POW2_ROUNDUP( x_size ); 120 uint32_t y_size_ext = POW2_ROUNDUP( y_size ); 121 uint32_t size_ext = MAX(x_size_ext , y_size_ext); 122 uint32_t level_max = (bits_log2(size_ext * size_ext) >> 1) + 1; 123 124 return level_max; 125 126 /* 126 cluster_t * cluster = LOCAL_CLUSTER; 127 127 128 // get cluster coordinates 128 uint32_t x = local_cxy >> y_width;129 uint32_t y = local_cxy & ((1<<y_width)-1);129 uint32_t x = HAL_X_FROM_CXY( local_cxy ); 130 uint32_t y = HAL_Y_FROM_CXY( local_cxy ); 130 131 131 132 // loop on local dqdt nodes (at most one node per level) … … 154 155 { 155 156 // set parent extended pointer 156 p_cxy = ((x & ~pmask)<<y_width) + (y & ~pmask);157 p_cxy = HAL_CXY_FROM_XY( (x & ~pmask) , (y & ~pmask) ); 157 158 node->parent = XPTR( p_cxy , &cluster->dqdt_tbl[level+1] ); 158 159 … … 168 169 if ( (level > 0) && ((y + (1<<(level-1))) < y_size) ) 169 170 { 170 c_cxy = local_cxy + (1<<(level-1));171 c_cxy = local_cxy + HAL_CXY_FROM_XY( 0 , (1<<(level-1) ); 171 172 node->children[1] = XPTR( c_cxy , &cluster->dqdt_tbl[level-1] ); 172 173 node->arity++; … … 176 177 if ( (level > 0) && ((x + (1<<(level-1))) < x_size) ) 177 178 { 178 c_cxy = local_cxy + ((1<<(level-1))<<y_width);179 c_cxy = local_cxy + HAL_CXY_FROM_XY( (1<<(level-1)) , 0 ); 179 180 node->children[2] = XPTR( c_cxy , &cluster->dqdt_tbl[level-1]); 180 181 node->arity++; … … 186 187 ((y + (1<<(level-1))) < y_size) ) 187 188 { 188 c_cxy = local_cxy + ((1<<(level-1))<<y_width) + (1<<(level-1));189 c_cxy = local_cxy + HAL_CXY_FROM_XY( (1<<(level-1)) , (1<<(level-1) ); 189 190 node->children[3] = XPTR( c_cxy , &cluster->dqdt_tbl[level-1]); 190 191 node->arity++; … … 194 195 195 196 return level_max; 196 197 */ 197 198 198 199 } // end dqdt_init() 199 200 201 /* 200 202 /////////////////////////////////////////////////////////////////////////// 201 203 // This recursive function is called by the dqdt_update_threads() function. … … 216 218 217 219 // get extended pointer on parent node 218 xptr_t parent = (xptr_t)hal_remote_l wd( XPTR( cxy , &ptr->parent ) );220 xptr_t parent = (xptr_t)hal_remote_l64( XPTR( cxy , &ptr->parent ) ); 219 221 220 222 // propagate if required 221 223 if ( parent != XPTR_NULL ) dqdt_propagate_threads( parent, increment ); 222 224 } 223 225 */ 226 227 /* 224 228 /////////////////////////////////////////////////////////////////////////// 225 229 // This recursive function is called by the dqdt_update_pages() function. … … 240 244 241 245 // get extended pointer on parent node 242 xptr_t parent = (xptr_t)hal_remote_l wd( XPTR( cxy , &ptr->parent ) );246 xptr_t parent = (xptr_t)hal_remote_l64( XPTR( cxy , &ptr->parent ) ); 243 247 244 248 // propagate if required 245 249 if ( parent != XPTR_NULL ) dqdt_propagate_pages( parent, increment ); 246 250 } 251 */ 247 252 248 253 ///////////////////////////////////////////// 249 void dqdt_update_threads( int32_t increment ) 250 { 251 return; 252 /* 254 void dqdt_update_threads( int32_t increment __attribute__ ((__unused__)) ) 255 { 256 257 return; 258 259 /* 253 260 cluster_t * cluster = LOCAL_CLUSTER; 254 261 dqdt_node_t * node = &cluster->dqdt_tbl[0]; … … 259 266 // propagate to DQDT upper levels 260 267 if( node->parent != XPTR_NULL ) dqdt_propagate_threads( node->parent , increment ); 261 */ 268 */ 269 262 270 } 263 271 264 272 /////////////////////////////////////////// 265 void dqdt_update_pages( int32_t increment ) 266 { 267 return; 268 /* 273 void dqdt_update_pages( int32_t increment __attribute__ ((__unused__)) ) 274 { 275 276 return; 277 278 /* 269 279 cluster_t * cluster = LOCAL_CLUSTER; 270 280 dqdt_node_t * node = &cluster->dqdt_tbl[0]; … … 275 285 // propagate to DQDT upper levels 276 286 if( node->parent != XPTR_NULL ) dqdt_propagate_pages( node->parent , increment ); 277 */ 278 } 279 280 287 */ 288 289 } 290 291 /* 281 292 //////////////////////////////////////////////////////////////////////////////// 282 293 // This recursive function is called by both the dqdt_get_cluster_for_process() … … 313 324 cxy = (cxy_t)GET_CXY( child ); 314 325 ptr = (dqdt_node_t *)GET_PTR( child ); 315 if( for_memory ) load = hal_remote_l w( XPTR( cxy , &ptr->pages ) );316 else load = hal_remote_l w( XPTR( cxy , &ptr->threads ) );326 if( for_memory ) load = hal_remote_l32( XPTR( cxy , &ptr->pages ) ); 327 else load = hal_remote_l32( XPTR( cxy , &ptr->threads ) ); 317 328 if( load < load_min ) 318 329 { … … 326 337 return dqdt_select_cluster( node_copy.children[select], for_memory ); 327 338 } 328 329 //////////////////////////////////// 339 */ 340 341 ////////////////////////////////////////// 330 342 cxy_t dqdt_get_cluster_for_process( void ) 331 343 { 332 return cluster_random_select(); 333 /* 344 345 return cluster_random_select(); 346 347 /* 334 348 // build extended pointer on DQDT root node 335 349 cluster_t * cluster = LOCAL_CLUSTER; … … 339 353 // call recursive function 340 354 return dqdt_select_cluster( root_xp , false ); 341 */ 342 } 343 344 //////////////////////////////////// 355 */ 356 357 } 358 359 ///////////////////////////////////////// 345 360 cxy_t dqdt_get_cluster_for_memory( void ) 346 361 { 347 return cluster_random_select(); 348 /* 362 363 return cluster_random_select(); 364 365 /* 349 366 // build extended pointer on DQDT root node 350 367 cluster_t * cluster = LOCAL_CLUSTER; … … 354 371 // call recursive function 355 372 return dqdt_select_cluster( root_xp , true ); 356 */ 357 } 358 373 */ 374 375 } 376 -
trunk/kernel/kern/dqdt.h
r485 r564 37 37 * quad-tree covering this one-dimensionnal vector. If the number of clusters 38 38 * is not a power of 4, the tree is truncated as required. 39 * 39 40 * TODO : the mapping for the one dimensionnal topology is not implemented yet [AG]. 40 41 * … … 55 56 * . Level 4 nodes exist when both X and Y coordinates are multiple of 16 56 57 * . Level 5 nodes exist when both X and Y coordinates are multiple of 32 58 * 59 * TODO : the cluster_info[x][y] array is not taken into account [AG]. 57 60 ***************************************************************************************/ 58 61 … … 85 88 * @ x_size : number of clusters (containing memory and CPUs) in a row 86 89 * @ y_size : number of clusters (containing memory and CPUs) in a column 87 * @ y_width : number of LSB used to code the Y value in CXY88 90 * @ return the number of levels in quad-tree. 89 91 ***************************************************************************************/ 90 92 uint32_t dqdt_init( uint32_t x_size, 91 uint32_t y_size, 92 uint32_t y_width ); 93 uint32_t y_size ); 93 94 94 95 /**************************************************************************************** -
trunk/kernel/kern/kernel_init.c
r561 r564 24 24 25 25 #include <kernel_config.h> 26 #include <hard_config.h> // for the USE_TXT_XXX macros27 26 #include <errno.h> 28 27 #include <hal_kernel_types.h> … … 30 29 #include <hal_context.h> 31 30 #include <hal_irqmask.h> 31 #include <hal_macros.h> 32 32 #include <hal_ppm.h> 33 33 #include <barrier.h> 34 #include < remote_barrier.h>34 #include <xbarrier.h> 35 35 #include <remote_fifo.h> 36 36 #include <core.h> … … 59 59 #include <devfs.h> 60 60 #include <mapper.h> 61 #include <cluster_info.h>62 61 63 62 /////////////////////////////////////////////////////////////////////////////////////////// … … 86 85 cluster_t cluster_manager CONFIG_CACHE_LINE_ALIGNED; 87 86 88 // This variable defines the TXT 0 kernel terminal (TX only)87 // This variable defines the TXT_TX[0] chdev 89 88 __attribute__((section(".kdata"))) 90 chdev_t txt0_ chdevCONFIG_CACHE_LINE_ALIGNED;91 92 // This variable defines the TXT 0 lock for writing characters to MTY089 chdev_t txt0_tx_chdev CONFIG_CACHE_LINE_ALIGNED; 90 91 // This variable defines the TXT_RX[0] chdev 93 92 __attribute__((section(".kdata"))) 94 spinlock_t txt0_lockCONFIG_CACHE_LINE_ALIGNED;93 chdev_t txt0_rx_chdev CONFIG_CACHE_LINE_ALIGNED; 95 94 96 95 // This variables define the kernel process0 descriptor … … 116 115 // This variable is used for CP0 cores synchronisation in kernel_init() 117 116 __attribute__((section(".kdata"))) 118 remote_barrier_tglobal_barrier CONFIG_CACHE_LINE_ALIGNED;117 xbarrier_t global_barrier CONFIG_CACHE_LINE_ALIGNED; 119 118 120 119 // This variable is used for local cores synchronisation in kernel_init() … … 127 126 128 127 // kernel_init is the entry point defined in hal/tsar_mips32/kernel.ld 129 // It will beused by the bootloader.128 // It is used by the bootloader. 130 129 extern void kernel_init( boot_info_t * info ); 131 130 132 // these debug variables are used to analyse the sys_read() syscall timing 131 // This array is used for debug, and describes the kernel locks usage, 132 // It must be kept consistent with the defines in kernel_config.h file. 133 char * lock_type_str[] = 134 { 135 "unused_0", // 0 136 137 "CLUSTER_KCM", // 1 138 "PPM_FREE", // 2 139 "SCHED_STATE", // 3 140 "VMM_STACK", // 4 141 "VMM_MMAP", // 5 142 "VFS_CTX", // 6 143 "KCM_STATE", // 7 144 "KHM_STATE", // 8 145 "HTAB_STATE", // 9 146 147 "THREAD_JOIN", // 10 148 "VFS_MAIN", // 11 149 "CHDEV_QUEUE", // 12 150 "CHDEV_TXT0", // 13 151 "CHDEV_TXTLIST", // 14 152 "PAGE_STATE", // 15 153 "MUTEX_STATE", // 16 154 "CONDVAR_STATE", // 17 155 "SEM_STATE", // 18 156 "XHTAB_STATE", // 19 157 158 "unused_20", // 20 159 160 "CLUSTER_PREFTBL", // 21 161 "PPM_DIRTY", // 22 162 163 "CLUSTER_LOCALS", // 23 164 "CLUSTER_COPIES", // 24 165 "PROCESS_CHILDREN", // 25 166 "PROCESS_USERSYNC", // 26 167 "PROCESS_FDARRAY", // 27 168 169 "MAPPER_STATE", // 28 170 "PROCESS_THTBL", // 29 171 172 "PROCESS_CWD", // 30 173 "VFS_INODE", // 31 174 "VFS_FILE", // 32 175 "VMM_VSL", // 33 176 }; 177 178 // these debug variables are used to analyse the sys_read() and sys_write() syscalls timing 133 179 134 180 #if DEBUG_SYS_READ … … 179 225 uint32_t exit_tty_isr_write; 180 226 #endif 227 228 // intrumentation variables : cumulated costs per syscall type in cluster 229 uint32_t syscalls_cumul_cost[SYSCALLS_NR]; 230 231 // intrumentation variables : number of syscalls per syscal type in cluster 232 uint32_t syscalls_occurences[SYSCALLS_NR]; 181 233 182 234 /////////////////////////////////////////////////////////////////////////////////////////// … … 201 253 202 254 /////////////////////////////////////////////////////////////////////////////////////////// 203 // This function initializes the TXT 0 chdev descriptor, that is the "kernel terminal",204 // shared by all kernel instances for debug messages.205 // It is a global variable (replicated in all clusters), because this terminal is used206 // be fore the kmem allocator initialisation, but only the instance in cluster containing207 // the c alling core isregistered in the "chdev_dir" directory.255 // This function initializes the TXT_TX[0] and TXT_RX[0] chdev descriptors, implementing 256 // the "kernel terminal", shared by all kernel instances for debug messages. 257 // These chdev are implemented as global variables (replicated in all clusters), 258 // because this terminal is used before the kmem allocator initialisation, but only 259 // the chdevs in cluster 0 are registered in the "chdev_dir" directory. 208 260 // As this TXT0 chdev supports only the TXT_SYNC_WRITE command, we don't create 209 261 // a server thread, we don't allocate a WTI, and we don't initialize the waiting queue. 262 // Note: The TXT_RX[0] chdev is created, but is not used by ALMOS-MKH (september 2018). 210 263 /////////////////////////////////////////////////////////////////////////////////////////// 211 264 // @ info : pointer on the local boot-info structure. 212 265 /////////////////////////////////////////////////////////////////////////////////////////// 213 static void txt0_device_init( boot_info_t * info )266 static void __attribute__ ((noinline)) txt0_device_init( boot_info_t * info ) 214 267 { 215 268 boot_device_t * dev_tbl; // pointer on array of devices in boot_info … … 237 290 if (func == DEV_FUNC_TXT ) 238 291 { 239 assert( (channels > 0) , "number of TXT channels cannot be 0\n"); 240 241 // initializes TXT_TX[0] chdev 242 txt0_chdev.func = func; 243 txt0_chdev.impl = impl; 244 txt0_chdev.channel = 0; 245 txt0_chdev.base = base; 246 txt0_chdev.is_rx = false; 247 248 // initializes lock 249 remote_spinlock_init( XPTR( local_cxy , &txt0_chdev.wait_lock ) ); 292 // initialize TXT_TX[0] chdev 293 txt0_tx_chdev.func = func; 294 txt0_tx_chdev.impl = impl; 295 txt0_tx_chdev.channel = 0; 296 txt0_tx_chdev.base = base; 297 txt0_tx_chdev.is_rx = false; 298 remote_busylock_init( XPTR( local_cxy , &txt0_tx_chdev.wait_lock ), 299 LOCK_CHDEV_TXT0 ); 250 300 251 // TXT specific initialisation: 252 // no server thread & no IRQ routing for channel 0 253 dev_txt_init( &txt0_chdev ); 254 255 // register the TXT0 in all chdev_dir[x][y] structures 301 // initialize TXT_RX[0] chdev 302 txt0_rx_chdev.func = func; 303 txt0_rx_chdev.impl = impl; 304 txt0_rx_chdev.channel = 0; 305 txt0_rx_chdev.base = base; 306 txt0_rx_chdev.is_rx = true; 307 remote_busylock_init( XPTR( local_cxy , &txt0_rx_chdev.wait_lock ), 308 LOCK_CHDEV_TXT0 ); 309 310 // make TXT specific initialisations 311 dev_txt_init( &txt0_tx_chdev ); 312 dev_txt_init( &txt0_rx_chdev ); 313 314 // register TXT_TX[0] & TXT_RX[0] in chdev_dir[x][y] 315 // for all valid clusters 256 316 for( x = 0 ; x < info->x_size ; x++ ) 257 317 { 258 for( y = 0 ; y < info->y_size ; y++ ) // [FIXME]318 for( y = 0 ; y < info->y_size ; y++ ) 259 319 { 260 if (cluster_info_is_active(info->cluster_info[x][y])) { 261 cxy_t cxy = (x<<info->y_width) + y; 262 hal_remote_swd( XPTR( cxy , &chdev_dir.txt_tx[0] ) , 263 XPTR( local_cxy , &txt0_chdev ) ); 320 cxy_t cxy = HAL_CXY_FROM_XY( x , y ); 321 322 if( cluster_is_active( cxy ) ) 323 { 324 hal_remote_s64( XPTR( cxy , &chdev_dir.txt_tx[0] ) , 325 XPTR( local_cxy , &txt0_tx_chdev ) ); 326 hal_remote_s64( XPTR( cxy , &chdev_dir.txt_rx[0] ) , 327 XPTR( local_cxy , &txt0_rx_chdev ) ); 264 328 } 265 329 } 266 330 } 331 332 hal_fence(); 267 333 } 268 334 } // end loop on devices 269 335 } // end txt0_device_init() 270 271 ///////////////////////////////////////////////////////////////////////////////////////////272 // This function is the same as txt0_device_init() but uses the internal multi_tty device273 // attached to cluster (0,0) instead of the external tty_tsar.274 // This function is used instead of txt0_device_init() only for TSAR LETI.275 ///////////////////////////////////////////////////////////////////////////////////////////276 // @ info : pointer on the local boot-info structure.277 ///////////////////////////////////////////////////////////////////////////////////////////278 static void mtty0_device_init( boot_info_t * info)279 {280 boot_device_t * dev_tbl; // pointer on array of devices in boot_info281 uint32_t dev_nr; // actual number of devices in this cluster282 xptr_t base; // remote pointer on segment base283 uint32_t func; // device functional index284 uint32_t impl; // device implementation index285 uint32_t i; // device index in dev_tbl286 uint32_t x; // X cluster coordinate287 uint32_t y; // Y cluster coordinate288 289 dev_nr = info->int_dev_nr;290 dev_tbl = info->int_dev;291 292 // Initialize spinlock for writing to MTY0293 spinlock_init(&txt0_lock);294 295 // Loop on internal peripherals of cluster (0,0) to find MTY0296 for ( i = 0; i < dev_nr; i++ )297 {298 base = dev_tbl[i].base;299 func = FUNC_FROM_TYPE( dev_tbl[i].type );300 impl = IMPL_FROM_TYPE( dev_tbl[i].type );301 302 if ( func == DEV_FUNC_TXT )303 {304 txt0_chdev.func = func;305 txt0_chdev.impl = impl;306 txt0_chdev.channel = 0;307 txt0_chdev.base = base;308 txt0_chdev.is_rx = false;309 310 // Initialize MTY0 chdev lock311 remote_spinlock_init( XPTR( local_cxy, &txt0_chdev.wait_lock ) );312 313 // MTY specific initialization314 dev_txt_init( &txt0_chdev );315 316 // register the MTY in all chdev_dir[x][y] structures317 for( x = 0 ; x < info->x_size ; x++ )318 {319 for( y = 0 ; y < info->y_size; y++ ) // [FIXME]320 {321 if (cluster_info_is_active(info->cluster_info[x][y])) {322 cxy_t cxy = (x<<info->y_width) + y;323 hal_remote_swd( XPTR( cxy , &chdev_dir.txt_tx[0] ) ,324 XPTR( local_cxy , &txt0_chdev ) );325 }326 }327 }328 }329 } // end loop on internal devices330 } // end mty0_device_init()331 336 332 337 /////////////////////////////////////////////////////////////////////////////////////////// … … 338 343 // @ info : pointer on the local boot-info structure. 339 344 /////////////////////////////////////////////////////////////////////////////////////////// 340 static void internal_devices_init( boot_info_t * info )345 static void __attribute__ ((noinline)) internal_devices_init( boot_info_t * info ) 341 346 { 342 347 boot_device_t * dev_tbl; // pointer on array of internaldevices in boot_info … … 367 372 if( func == DEV_FUNC_MMC ) 368 373 { 369 assert( (channels == 1) , "MMC device must be single channel\n" ); 374 375 // check channels 376 if( channels != 1 ) 377 printk("\n[PANIC] in %s : MMC device must be single channel\n", __FUNCTION__ ); 370 378 371 379 // create chdev in local cluster … … 376 384 base ); 377 385 378 assert( (chdev_ptr != NULL) , 379 "cannot allocate memory for MMC chdev\n" ); 386 // check memory 387 if( chdev_ptr == NULL ) 388 printk("\n[PANIC] in %s : cannot create MMC chdev\n", __FUNCTION__ ); 380 389 381 390 // make MMC specific initialisation … … 385 394 for( x = 0 ; x < info->x_size ; x++ ) 386 395 { 387 for( y = 0 ; y < info->y_size ; y++ ) // [FIXME]396 for( y = 0 ; y < info->y_size ; y++ ) 388 397 { 389 if (cluster_info_is_active(info->cluster_info[x][y])) { 390 cxy_t cxy = (x<<info->y_width) + y; 391 hal_remote_swd( XPTR( cxy , &chdev_dir.mmc[local_cxy] ), 398 cxy_t cxy = HAL_CXY_FROM_XY( x , y ); 399 400 if( cluster_is_active( cxy ) ) 401 { 402 hal_remote_s64( XPTR( cxy , &chdev_dir.mmc[local_cxy] ), 392 403 XPTR( local_cxy , chdev_ptr ) ); 393 404 } … … 414 425 base ); 415 426 416 assert( (chdev_ptr != NULL) , "cannot allocate memory for DMA chdev" ); 417 427 // check memory 428 if( chdev_ptr == NULL ) 429 printk("\n[PANIC] in %s : cannot create DMA chdev\n", __FUNCTION__ ); 430 418 431 // make DMA specific initialisation 419 432 dev_dma_init( chdev_ptr ); … … 430 443 } 431 444 } 432 433 ///////////////////////////////434 else if ( func == DEV_FUNC_TXT && USE_TXT_MTY == 1 )435 {436 assert(impl == IMPL_TXT_MTY,437 "Internal TTYs should have MTY implementation\n");438 439 for ( channel = 0; channel < channels; channel++ )440 {441 int rx;442 for ( rx = 0; rx <= 1; rx++ )443 {444 // skip MTY0_TX since it has already been initialized445 if ( channel == 0 && rx == 0 ) continue;446 447 // create chdev in local cluster448 chdev_ptr = chdev_create( func,449 impl,450 channel,451 rx,452 base );453 454 assert( (chdev_ptr != NULL) ,455 "cannot allocate memory for MTY chdev" );456 457 // make MTY specific initialization458 dev_txt_init( chdev_ptr );459 460 // set the MTY fields in all clusters461 xptr_t *chdev_entry;462 if ( rx == 1 ) {463 chdev_entry = &chdev_dir.txt_rx[channel];464 } else {465 chdev_entry = &chdev_dir.txt_tx[channel];466 }467 for ( x = 0; x < info->x_size; x++ )468 {469 for ( y = 0; y < info->y_size; y++ )470 {471 if (cluster_info_is_active(info->cluster_info[x][y])) {472 cxy_t cxy = (x<<info->y_width) + y;473 hal_remote_swd( XPTR( cxy, chdev_entry ),474 XPTR( local_cxy, chdev_ptr ) );475 }476 }477 }478 #if( DEBUG_KERNEL_INIT & 0x1 )479 if( hal_time_stamp() > DEBUG_KERNEL_INIT )480 printk("\n[DBG] %s : created MTY[%d] in cluster %x / chdev = %x\n",481 __FUNCTION__ , channel , local_cxy , chdev_ptr );482 #endif483 }484 }485 }486 487 ///////////////////////////////488 else if ( func == DEV_FUNC_IOC )489 {490 assert(impl == IMPL_IOC_SPI, __FUNCTION__,491 "Internal IOC should have SPI implementation\n");492 493 for ( channel = 0; channel < channels; channel++ )494 {495 // create chdev in local cluster496 chdev_ptr = chdev_create( func,497 impl,498 channel,499 0,500 base );501 502 assert( (chdev_ptr != NULL) , __FUNCTION__ ,503 "cannot allocate memory for IOC chdev" );504 505 // make IOC specific initialization506 dev_ioc_init( chdev_ptr );507 508 // set the IOC fields in all clusters509 xptr_t *chdev_entry = &chdev_dir.ioc[channel];510 for ( x = 0; x < info->x_size; x++ )511 {512 for ( y = 0; y < info->y_size; y++ )513 {514 if (cluster_info_is_active(info->cluster_info[x][y])) {515 cxy_t cxy = (x<<info->y_width) + y;516 hal_remote_swd( XPTR( cxy, chdev_entry ),517 XPTR( local_cxy, chdev_ptr ) );518 }519 }520 }521 #if( DEBUG_KERNEL_INIT & 0x1 )522 if( hal_time_stamp() > DEBUG_KERNEL_INIT )523 printk("\n[DBG] %s : created IOC[%d] in cluster %x / chdev = %x\n",524 __FUNCTION__ , channel , local_cxy , chdev_ptr );525 #endif526 }527 }528 529 445 } 530 446 } // end internal_devices_init() … … 586 502 587 503 // check PIC device initialized 588 assert( (chdev_dir.pic != XPTR_NULL ) ,589 "PIC device must be initialized before other devices\n");504 if( chdev_dir.pic == XPTR_NULL ) 505 printk("\n[PANIC] in %s : PIC device must be initialized first\n", __FUNCTION__ ); 590 506 591 507 // check external device functionnal type 592 assert( ( (func == DEV_FUNC_IOB) || 593 (func == DEV_FUNC_IOC) || 594 (func == DEV_FUNC_TXT) || 595 (func == DEV_FUNC_NIC) || 596 (func == DEV_FUNC_FBF) ) , 597 "undefined external peripheral type\n" ); 508 if( (func != DEV_FUNC_IOB) && (func != DEV_FUNC_IOC) && (func != DEV_FUNC_TXT) && 509 (func != DEV_FUNC_NIC) && (func != DEV_FUNC_FBF) ) 510 printk("\n[PANIC] in %s : undefined peripheral type\n", __FUNCTION__ ); 598 511 599 512 // loops on channels … … 603 516 for( rx = 0 ; rx < directions ; rx++ ) 604 517 { 605 // skip TXT_TX[0] chdev that has already been created & registered 606 if( USE_TXT_MTY == 0 && (func == DEV_FUNC_TXT) && (channel == 0) && (rx == 0) ) 518 // skip TXT0 that has already been initialized 519 if( (func == DEV_FUNC_TXT) && (channel == 0) ) continue; 520 521 // all kernel instances compute the target cluster for all chdevs, 522 // computing the global index ext_chdev_gid[func,channel,direction] 523 cxy_t target_cxy; 524 while( 1 ) 607 525 { 608 continue; 526 uint32_t offset = ext_chdev_gid % ( info->x_size * info->y_size ); 527 uint32_t x = offset / info->y_size; 528 uint32_t y = offset % info->y_size; 529 530 target_cxy = HAL_CXY_FROM_XY( x , y ); 531 532 // exit loop if target cluster is active 533 if( cluster_is_active( target_cxy ) ) break; 534 535 // increment global index otherwise 536 ext_chdev_gid++; 609 537 } 610 538 611 // skip TXT chdevs because they are initialized in internal_devices_init()612 if ( USE_TXT_MTY == 1 && func == DEV_FUNC_TXT )613 {614 continue;615 }616 617 if ( func == DEV_FUNC_IOC && impl == IMPL_IOC_SPI )618 {619 continue;620 }621 622 // compute target cluster for chdev[func,channel,direction]623 uint32_t offset;624 uint32_t cx;625 uint32_t cy;626 uint32_t target_cxy;627 while (1) {628 offset = ext_chdev_gid % ( info->x_size * (info->y_size) );629 cx = offset / (info->y_size);630 cy = offset % (info->y_size);631 target_cxy = (cx<<info->y_width) + cy;632 // ext_chdev_gid that results in empty target clusters are skipped633 if ( cluster_info_is_active( LOCAL_CLUSTER->cluster_info[cx][cy] ) == 0 ) {634 ext_chdev_gid++;635 } else { // The ext_chdev_gid resulted in a full target cluster636 break;637 }638 }639 539 // allocate and initialize a local chdev 640 540 // when local cluster matches target cluster … … 647 547 base ); 648 548 649 assert( (chdev != NULL), 650 "cannot allocate external device" ); 549 if( chdev == NULL ) 550 printk("\n[PANIC] in %s : cannot allocate chdev for external device\n", 551 __FUNCTION__ ); 651 552 652 553 // make device type specific initialisation … … 672 573 for( x = 0 ; x < info->x_size ; x++ ) 673 574 { 674 for ( y = 0; y < info->y_size; y++ )575 for( y = 0 ; y < info->y_size ; y++ ) 675 576 { 676 if (cluster_info_is_active(info->cluster_info[x][y])) { 677 cxy_t cxy = (x<<info->y_width) + y; 678 hal_remote_swd( XPTR( cxy , entry ), 577 cxy_t cxy = HAL_CXY_FROM_XY( x , y ); 578 579 if( cluster_is_active( cxy ) ) 580 { 581 hal_remote_s64( XPTR( cxy , entry ), 679 582 XPTR( local_cxy , chdev ) ); 680 583 } … … 706 609 // @ info : pointer on the local boot-info structure. 707 610 /////////////////////////////////////////////////////////////////////////////////////////// 708 static void iopic_init( boot_info_t * info )611 static void __attribute__ ((noinline)) iopic_init( boot_info_t * info ) 709 612 { 710 613 boot_device_t * dev_tbl; // pointer on boot_info external devices array … … 723 626 dev_tbl = info->ext_dev; 724 627 628 // avoid GCC warning 629 base = XPTR_NULL; 630 impl = 0; 631 725 632 // loop on external peripherals to get the IOPIC 726 633 for( i = 0 , found = false ; i < dev_nr ; i++ ) … … 737 644 } 738 645 739 assert( found , "PIC device not found\n" ); 646 // check PIC existence 647 if( found == false ) 648 printk("\n[PANIC] in %s : PIC device not found\n", __FUNCTION__ ); 740 649 741 650 // allocate and initialize the PIC chdev in cluster 0 … … 746 655 base ); 747 656 748 assert( (chdev != NULL), "no memory for PIC chdev\n" ); 657 // check memory 658 if( chdev == NULL ) 659 printk("\n[PANIC] in %s : no memory for PIC chdev\n", __FUNCTION__ ); 749 660 750 661 // make PIC device type specific initialisation … … 757 668 for( x = 0 ; x < info->x_size ; x++ ) 758 669 { 759 for ( y = 0; y < info->y_size; y++ )670 for( y = 0 ; y < info->y_size ; y++ ) 760 671 { 761 if (cluster_info_is_active(info->cluster_info[x][y])) { 762 cxy_t cxy = (x<<info->y_width) + y; 763 hal_remote_swd( XPTR( cxy , entry ) , 672 cxy_t cxy = HAL_CXY_FROM_XY( x , y ); 673 674 if( cluster_is_active( cxy ) ) 675 { 676 hal_remote_s64( XPTR( cxy , entry ) , 764 677 XPTR( local_cxy , chdev ) ); 765 678 } … … 773 686 for( x = 0 ; x < info->x_size ; x++ ) 774 687 { 775 for ( y = 0; y < info->y_size; y++ )688 for( y = 0 ; y < info->y_size ; y++ ) 776 689 { 777 if (cluster_info_is_active(info->cluster_info[x][y])) { 778 cxy_t cxy = (x<<info->y_width) + y; 779 hal_remote_memset( XPTR( cxy , &iopic_input ) , 0xFF , sizeof(iopic_input_t) ); 690 cxy_t cxy = HAL_CXY_FROM_XY( x , y ); 691 692 if( cluster_is_active( cxy ) ) 693 { 694 hal_remote_memset( XPTR( cxy , &iopic_input ), 695 0xFF , sizeof(iopic_input_t) ); 780 696 } 781 697 } … … 807 723 else if((func == DEV_FUNC_NIC) && (is_rx != 0)) ptr = &iopic_input.nic_rx[channel]; 808 724 else if( func == DEV_FUNC_IOB ) ptr = &iopic_input.iob; 809 else assert( false , "illegal source device for IOPIC input" );725 else printk("\n[PANIC] in %s : illegal source device for IOPIC input" ); 810 726 811 727 // set one entry in all "iopic_input" structures 812 728 for( x = 0 ; x < info->x_size ; x++ ) 813 729 { 814 for ( y = 0; y < info->y_size; y++ )730 for( y = 0 ; y < info->y_size ; y++ ) 815 731 { 816 if (cluster_info_is_active(info->cluster_info[x][y])) { 817 cxy_t cxy = (x<<info->y_width) + y; 818 hal_remote_swd( XPTR( cxy , ptr ) , id ); 732 cxy_t cxy = HAL_CXY_FROM_XY( x , y ); 733 734 if( cluster_is_active( cxy ) ) 735 { 736 hal_remote_s64( XPTR( cxy , ptr ) , id ); 819 737 } 820 738 } … … 824 742 825 743 #if( DEBUG_KERNEL_INIT & 0x1 ) 826 if( hal_tim e_stamp() > DEBUG_KERNEL_INIT )744 if( hal_tim_stamp() > DEBUG_KERNEL_INIT ) 827 745 { 828 746 printk("\n[DBG] %s created PIC chdev in cluster %x at cycle %d\n", … … 843 761 // @ info : pointer on the local boot-info structure. 844 762 /////////////////////////////////////////////////////////////////////////////////////////// 845 static void lapic_init( boot_info_t * info )763 static void __attribute__ ((noinline)) lapic_init( boot_info_t * info ) 846 764 { 847 765 boot_device_t * dev_tbl; // pointer on boot_info internal devices array … … 896 814 if ( func == DEV_FUNC_MMC ) lapic_input.mmc = id; 897 815 else if( func == DEV_FUNC_DMA ) lapic_input.dma[channel] = id; 898 else if( func == DEV_FUNC_TXT ) lapic_input.mtty = id; 899 else if( func == DEV_FUNC_IOC ) lapic_input.sdcard = id; 900 else assert( false , "illegal source device for LAPIC input" ); 816 else printk("\n[PANIC] in %s : illegal source device for LAPIC input" ); 901 817 } 902 818 } … … 913 829 // @ return 0 if success / return EINVAL if not found. 914 830 /////////////////////////////////////////////////////////////////////////////////////////// 915 static error_t get_core_identifiers( boot_info_t * info,916 lid_t * lid,917 cxy_t * cxy,918 gid_t * gid )831 static error_t __attribute__ ((noinline)) get_core_identifiers( boot_info_t * info, 832 lid_t * lid, 833 cxy_t * cxy, 834 gid_t * gid ) 919 835 { 920 836 uint32_t i; … … 989 905 thread->core = &LOCAL_CLUSTER->core_tbl[core_lid]; 990 906 991 // each core initializes the idle thread lists of locks 992 list_root_init( &thread->locks_root ); 993 xlist_root_init( XPTR( local_cxy , &thread->xlocks_root ) ); 994 thread->local_locks = 0; 995 thread->remote_locks = 0; 996 997 // CP0 in cluster 0 initializes TXT0 chdev descriptor 998 if( core_cxy == 0 && core_lid == 0 ) // [MODIF] 999 { 1000 if( USE_TXT_MTY == 1 ) { 1001 mtty0_device_init( info ); 1002 } else { 1003 txt0_device_init( info ); 1004 } 1005 } 1006 1007 ///////////////////////////////////////////////////////////////////////////////// 1008 if( core_lid == 0 ) remote_barrier( XPTR( 0 , &global_barrier ), // [FIXME] 1009 cluster_info_nb_actives(info->cluster_info) ); 907 // each core initializes the idle thread locks counters 908 thread->busylocks = 0; 909 910 #if DEBUG_BUSYLOCK 911 // each core initialise the idle thread list of busylocks 912 xlist_root_init( XPTR( local_cxy , &thread->busylocks_root ) ); 913 #endif 914 915 // CP0 initializes cluster info 916 if( core_lid == 0 ) cluster_info_init( info ); 917 918 // CP0 in cluster 0 initialises TXT0 chdev descriptor 919 if( (core_lid == 0) && (core_cxy == 0) ) txt0_device_init( info ); 920 921 ///////////////////////////////////////////////////////////////////////////////// 922 if( core_lid == 0 ) xbarrier_wait( XPTR( 0 , &global_barrier ), 923 (info->x_size * info->y_size) ); 1010 924 barrier_wait( &local_barrier , info->cores_nr ); 1011 925 ///////////////////////////////////////////////////////////////////////////////// 1012 926 1013 927 #if DEBUG_KERNEL_INIT 1014 if( (core_lid == 0) & (local_cxy == 0) )1015 printk("\n[DBG] %s : exit barrier 0 : TXT0 initialized / cycle %d\n",1016 __FUNCTION__, (uint32_t)hal_get_ cycles() );928 // if( (core_lid == 0) & (local_cxy == 0) ) 929 printk("\n[DBG] %s : exit barrier 0 : TXT0 initialized / sr %x / cycle %d\n", 930 __FUNCTION__, (uint32_t)hal_get_sr(), (uint32_t)hal_get_cycles() ); 1017 931 #endif 1018 932 … … 1025 939 // all cores check identifiers 1026 940 if( error ) 1027 { 1028 assert( false , 1029 "illegal core identifiers gid = %x / cxy = %x / lid = %d", 1030 core_lid , core_cxy , core_lid ); 1031 } 1032 1033 // CP0 initializes cluster manager 941 printk("\n[PANIC] in %s : illegal core : gid %x / cxy %x / lid %d", 942 __FUNCTION__, core_lid, core_cxy, core_lid ); 943 944 // CP0 initializes cluster manager complex structures 1034 945 if( core_lid == 0 ) 1035 946 { 1036 error = cluster_ init( info );947 error = cluster_manager_init( info ); 1037 948 1038 949 if( error ) 1039 { 1040 assert( false , 1041 "cannot initialise cluster %x", local_cxy ); 1042 } 950 printk("\n[PANIC] in %s : cannot initialize cluster manager in cluster %x\n", 951 __FUNCTION__, local_cxy ); 1043 952 } 1044 953 1045 954 ///////////////////////////////////////////////////////////////////////////////// 1046 if( core_lid == 0 ) remote_barrier( XPTR( 0 , &global_barrier ), // [FIXME]1047 cluster_info_nb_actives(info->cluster_info) );955 if( core_lid == 0 ) xbarrier_wait( XPTR( 0 , &global_barrier ), 956 (info->x_size * info->y_size) ); 1048 957 barrier_wait( &local_barrier , info->cores_nr ); 1049 958 ///////////////////////////////////////////////////////////////////////////////// … … 1051 960 #if DEBUG_KERNEL_INIT 1052 961 if( (core_lid == 0) & (local_cxy == 0) ) 1053 printk("\n[DBG] %s : exit barrier 1 : clusters initialised / cycle %d\n",1054 __FUNCTION__, (uint32_t)hal_get_ cycles() );962 printk("\n[DBG] %s : exit barrier 1 : clusters initialised / sr %x / cycle %d\n", 963 __FUNCTION__, (uint32_t)hal_get_sr(), (uint32_t)hal_get_cycles() ); 1055 964 #endif 1056 965 … … 1071 980 1072 981 //////////////////////////////////////////////////////////////////////////////// 1073 if( core_lid == 0 ) remote_barrier( XPTR( 0 , &global_barrier ), // [FIXME]1074 cluster_info_nb_actives(info->cluster_info) );982 if( core_lid == 0 ) xbarrier_wait( XPTR( 0 , &global_barrier ), 983 (info->x_size * info->y_size) ); 1075 984 barrier_wait( &local_barrier , info->cores_nr ); 1076 985 //////////////////////////////////////////////////////////////////////////////// … … 1078 987 #if DEBUG_KERNEL_INIT 1079 988 if( (core_lid == 0) & (local_cxy == 0) ) 1080 printk("\n[DBG] %s : exit barrier 2 : PIC initialised / cycle %d\n",1081 __FUNCTION__, (uint32_t)hal_get_ cycles() );989 printk("\n[DBG] %s : exit barrier 2 : PIC initialised / sr %x / cycle %d\n", 990 __FUNCTION__, (uint32_t)hal_get_sr(), (uint32_t)hal_get_cycles() ); 1082 991 #endif 1083 992 … … 1104 1013 1105 1014 ///////////////////////////////////////////////////////////////////////////////// 1106 if( core_lid == 0 ) remote_barrier( XPTR( 0 , &global_barrier ), // [FIXME]1107 cluster_info_nb_actives(info->cluster_info) );1015 if( core_lid == 0 ) xbarrier_wait( XPTR( 0 , &global_barrier ), 1016 (info->x_size * info->y_size) ); 1108 1017 barrier_wait( &local_barrier , info->cores_nr ); 1109 1018 ///////////////////////////////////////////////////////////////////////////////// … … 1111 1020 #if DEBUG_KERNEL_INIT 1112 1021 if( (core_lid == 0) & (local_cxy == 0) ) 1113 printk("\n[DBG] %s : exit barrier 3 : all chdev initialised / cycle %d\n",1114 __FUNCTION__, (uint32_t)hal_get_ cycles() );1022 printk("\n[DBG] %s : exit barrier 3 : all chdev initialised / sr %x / cycle %d\n", 1023 __FUNCTION__, (uint32_t)hal_get_sr(), (uint32_t)hal_get_cycles() ); 1115 1024 #endif 1116 1025 … … 1127 1036 ///////////////////////////////////////////////////////////////////////////////// 1128 1037 1129 // All cores enable the shared IPI channel1038 // All cores enable IPI 1130 1039 dev_pic_enable_ipi(); 1131 1040 hal_enable_irq( &status ); 1132 1133 #if DEBUG_KERNEL_INIT1134 printk("\n[DBG] %s: IPI enabled for core %d cluster %d\n", __FUNCTION__,1135 core_lid, local_cxy);1136 #endif1137 1041 1138 1042 // all cores initialize the idle thread descriptor … … 1163 1067 fatfs_ctx_t * fatfs_ctx = fatfs_ctx_alloc(); 1164 1068 1165 assert( (fatfs_ctx != NULL) , 1166 "cannot create FATFS context in cluster 0\n" ); 1069 if( fatfs_ctx == NULL ) 1070 printk("\n[PANIC] in %s : cannot create FATFS context in cluster 0\n", 1071 __FUNCTION__ ); 1167 1072 1168 1073 // 2. access boot device to initialize FATFS context … … 1175 1080 uint32_t total_clusters = fatfs_ctx->fat_sectors_count << 7; 1176 1081 1177 // 4. create VFS root inode in cluster 0 1082 // 4. initialize the FATFS entry in the vfs_context[] array 1083 vfs_ctx_init( FS_TYPE_FATFS, // fs type 1084 0, // attributes: unused 1085 total_clusters, 1086 cluster_size, 1087 vfs_root_inode_xp, // VFS root 1088 fatfs_ctx ); // extend 1089 1090 // 5. create VFS root inode in cluster 0 1178 1091 error = vfs_inode_create( XPTR_NULL, // dentry_xp 1179 1092 FS_TYPE_FATFS, // fs_type … … 1185 1098 0, // gid 1186 1099 &vfs_root_inode_xp ); // return 1187 1188 assert( (error == 0) , 1189 "cannot create VFS root inode\n" ); 1190 1191 // 5. initialize VFS context for FAT in cluster 0 1192 vfs_ctx_init( FS_TYPE_FATFS, // file system type 1193 0, // attributes 1194 total_clusters, 1195 cluster_size, 1196 vfs_root_inode_xp, // VFS root 1197 fatfs_ctx ); // extend 1198 1199 // 6. check initialisation 1100 if( error ) 1101 printk("\n[PANIC] in %s : cannot create VFS root inode in cluster 0\n", 1102 __FUNCTION__ ); 1103 1104 // 6. update the FATFS entry in vfs_context[] array 1105 fs_context[FS_TYPE_FATFS].vfs_root_xp = vfs_root_inode_xp; 1106 1107 // 7. check FATFS initialization 1200 1108 vfs_ctx_t * vfs_ctx = &fs_context[FS_TYPE_FATFS]; 1201 assert( (((fatfs_ctx_t *)vfs_ctx->extend)->sectors_per_cluster == 8), 1202 "illegal value for FATFS context in cluster %x\n", local_cxy ); 1109 1110 if( ((fatfs_ctx_t *)vfs_ctx->extend)->sectors_per_cluster != 8 ) 1111 printk("\n[PANIC] in %s : illegal FATFS context in cluster 0\n", 1112 __FUNCTION__ ); 1203 1113 } 1204 1114 else 1205 1115 { 1206 assert( false,1207 "root FS must be FATFS");1116 printk("\n[PANIC] in %s : unsupported VFS type in cluster 0\n", 1117 __FUNCTION__ ); 1208 1118 } 1209 1119 … … 1214 1124 1215 1125 ///////////////////////////////////////////////////////////////////////////////// 1216 if( core_lid == 0 ) remote_barrier( XPTR( 0 , &global_barrier ), // [FIXME]1217 cluster_info_nb_actives(info->cluster_info) );1126 if( core_lid == 0 ) xbarrier_wait( XPTR( 0 , &global_barrier ), 1127 (info->x_size * info->y_size) ); 1218 1128 barrier_wait( &local_barrier , info->cores_nr ); 1219 1129 ///////////////////////////////////////////////////////////////////////////////// … … 1221 1131 #if DEBUG_KERNEL_INIT 1222 1132 if( (core_lid == 0) & (local_cxy == 0) ) 1223 printk("\n[DBG] %s : exit barrier 4 : VFS _root = %l in cluster 0/ cycle %d\n",1224 __FUNCTION__, vfs_root_inode_xp , (uint32_t)hal_get_cycles());1133 printk("\n[DBG] %s : exit barrier 4 : VFS root initialized in cluster 0 / sr %x / cycle %d\n", 1134 __FUNCTION__, (uint32_t)hal_get_sr(), (uint32_t)hal_get_cycles() ); 1225 1135 #endif 1226 1136 … … 1241 1151 fatfs_ctx_t * local_fatfs_ctx = fatfs_ctx_alloc(); 1242 1152 1243 assert( (local_fatfs_ctx != NULL) , 1244 "cannot create FATFS context in cluster %x\n", local_cxy ); 1153 // check memory 1154 if( local_fatfs_ctx == NULL ) 1155 printk("\n[PANIC] in %s : cannot create FATFS context in cluster %x\n", 1156 __FUNCTION__ , local_cxy ); 1245 1157 1246 1158 // 2. get local pointer on VFS context for FATFS … … 1261 1173 vfs_ctx->extend = local_fatfs_ctx; 1262 1174 1263 // 7. check initialisation1264 assert( (((fatfs_ctx_t *)vfs_ctx->extend)->sectors_per_cluster == 8),1265 "illegal value for FATFS context in cluster %x\n", local_cxy );1175 if( ((fatfs_ctx_t *)vfs_ctx->extend)->sectors_per_cluster != 8 ) 1176 printk("\n[PANIC] in %s : illegal FATFS context in cluster %x\n", 1177 __FUNCTION__ , local_cxy ); 1266 1178 } 1267 1179 1268 1180 // get extended pointer on VFS root inode from cluster 0 1269 vfs_root_inode_xp = hal_remote_l wd( XPTR( 0 , &process_zero.vfs_root_xp ) );1181 vfs_root_inode_xp = hal_remote_l64( XPTR( 0 , &process_zero.vfs_root_xp ) ); 1270 1182 1271 1183 // update local process_zero descriptor … … 1275 1187 1276 1188 ///////////////////////////////////////////////////////////////////////////////// 1277 if( core_lid == 0 ) remote_barrier( XPTR( 0 , &global_barrier ), // [FIXME]1278 cluster_info_nb_actives(info->cluster_info) );1189 if( core_lid == 0 ) xbarrier_wait( XPTR( 0 , &global_barrier ), 1190 (info->x_size * info->y_size) ); 1279 1191 barrier_wait( &local_barrier , info->cores_nr ); 1280 1192 ///////////////////////////////////////////////////////////////////////////////// 1281 1193 1282 1194 #if DEBUG_KERNEL_INIT 1283 if( (core_lid == 0) & (local_cxy == 0) )1284 printk("\n[DBG] %s : exit barrier 5 : VFS _root = %l in cluster 0/ cycle %d\n",1285 __FUNCTION__, vfs_root_inode_xp , (uint32_t)hal_get_cycles());1286 #endif 1287 1288 ///////////////////////////////////////////////////////////////////////////////// 1289 // STEP 6 : CP0 in cluster IOmakes the global DEVFS tree initialisation:1290 // It creates the DEVFS directory "dev", and the DEVFS "external"1291 // directory in cluster IO and mount these inodes into VFS.1292 ///////////////////////////////////////////////////////////////////////////////// 1293 1294 if( (core_lid == 0) && (local_cxy == 0) ) // [FIXME]1195 if( (core_lid == 0) & (local_cxy == 1) ) 1196 printk("\n[DBG] %s : exit barrier 5 : VFS root initialized in cluster 1 / sr %x / cycle %d\n", 1197 __FUNCTION__, (uint32_t)hal_get_sr(), (uint32_t)hal_get_cycles() ); 1198 #endif 1199 1200 ///////////////////////////////////////////////////////////////////////////////// 1201 // STEP 6 : CP0 in cluster 0 makes the global DEVFS tree initialisation: 1202 // It initializes the DEVFS context, and creates the DEVFS 1203 // "dev" and "external" inodes in cluster 0. 1204 ///////////////////////////////////////////////////////////////////////////////// 1205 1206 if( (core_lid == 0) && (local_cxy == 0) ) 1295 1207 { 1296 // create "dev" and "external" directories. 1208 // 1. allocate memory for DEVFS context extension in cluster 0 1209 devfs_ctx_t * devfs_ctx = devfs_ctx_alloc(); 1210 1211 if( devfs_ctx == NULL ) 1212 printk("\n[PANIC] in %s : cannot create DEVFS context in cluster 0\n", 1213 __FUNCTION__ , local_cxy ); 1214 1215 // 2. initialize the DEVFS entry in the vfs_context[] array 1216 vfs_ctx_init( FS_TYPE_DEVFS, // fs type 1217 0, // attributes: unused 1218 0, // total_clusters: unused 1219 0, // cluster_size: unused 1220 vfs_root_inode_xp, // VFS root 1221 devfs_ctx ); // extend 1222 1223 // 3. create "dev" and "external" inodes (directories) 1297 1224 devfs_global_init( process_zero.vfs_root_xp, 1298 1225 &devfs_dev_inode_xp, 1299 1226 &devfs_external_inode_xp ); 1300 1227 1301 // creates the DEVFS context in cluster IO 1302 devfs_ctx_t * devfs_ctx = devfs_ctx_alloc(); 1303 1304 assert( (devfs_ctx != NULL) , 1305 "cannot create DEVFS context in cluster IO\n"); 1306 1307 // register DEVFS root and external directories 1308 devfs_ctx_init( devfs_ctx, devfs_dev_inode_xp, devfs_external_inode_xp ); 1228 // 4. initializes DEVFS context extension 1229 devfs_ctx_init( devfs_ctx, 1230 devfs_dev_inode_xp, 1231 devfs_external_inode_xp ); 1309 1232 } 1310 1233 1311 1234 ///////////////////////////////////////////////////////////////////////////////// 1312 if( core_lid == 0 ) remote_barrier( XPTR( 0 , &global_barrier ), // [FIXME]1313 cluster_info_nb_actives(info->cluster_info) );1235 if( core_lid == 0 ) xbarrier_wait( XPTR( 0 , &global_barrier ), 1236 (info->x_size * info->y_size) ); 1314 1237 barrier_wait( &local_barrier , info->cores_nr ); 1315 1238 ///////////////////////////////////////////////////////////////////////////////// … … 1317 1240 #if DEBUG_KERNEL_INIT 1318 1241 if( (core_lid == 0) & (local_cxy == 0) ) 1319 printk("\n[DBG] %s : exit barrier 6 : dev_root = %l in cluster 0/ cycle %d\n",1320 __FUNCTION__, devfs_dev_inode_xp, (uint32_t)hal_get_cycles() );1242 printk("\n[DBG] %s : exit barrier 6 : DEVFS root initialized in cluster 0 / sr %x / cycle %d\n", 1243 __FUNCTION__, (uint32_t)hal_get_sr(), (uint32_t)hal_get_cycles() ); 1321 1244 #endif 1322 1245 … … 1324 1247 // STEP 7 : All CP0s complete in parallel the DEVFS tree initialization. 1325 1248 // Each CP0 get the "dev" and "external" extended pointers from 1326 // values stored in cluster IO.1327 // Then each CP0 in cluster(i) creates the DEVFS "internal directory,1249 // values stored in cluster 0. 1250 // Then each CP0 in cluster(i) creates the DEVFS "internal" directory, 1328 1251 // and creates the pseudo-files for all chdevs in cluster (i). 1329 1252 ///////////////////////////////////////////////////////////////////////////////// … … 1331 1254 if( core_lid == 0 ) 1332 1255 { 1333 // get extended pointer on "extend" field of VFS context for DEVFS in cluster IO1334 xptr_t extend_xp = XPTR( 0 , &fs_context[FS_TYPE_DEVFS].extend ); // [FIXME]1256 // get extended pointer on "extend" field of VFS context for DEVFS in cluster 0 1257 xptr_t extend_xp = XPTR( 0 , &fs_context[FS_TYPE_DEVFS].extend ); 1335 1258 1336 1259 // get pointer on DEVFS context in cluster 0 1337 1260 devfs_ctx_t * devfs_ctx = hal_remote_lpt( extend_xp ); 1338 1261 1339 devfs_dev_inode_xp = hal_remote_l wd( XPTR( 0 , &devfs_ctx->dev_inode_xp ) );1340 devfs_external_inode_xp = hal_remote_l wd( XPTR( 0 , &devfs_ctx->external_inode_xp ) );1262 devfs_dev_inode_xp = hal_remote_l64( XPTR( 0 , &devfs_ctx->dev_inode_xp ) ); 1263 devfs_external_inode_xp = hal_remote_l64( XPTR( 0 , &devfs_ctx->external_inode_xp ) ); 1341 1264 1342 1265 // populate DEVFS in all clusters … … 1347 1270 1348 1271 ///////////////////////////////////////////////////////////////////////////////// 1349 if( core_lid == 0 ) remote_barrier( XPTR( 0 , &global_barrier ), // [FIXME]1350 cluster_info_nb_actives(info->cluster_info) );1272 if( core_lid == 0 ) xbarrier_wait( XPTR( 0 , &global_barrier ), 1273 (info->x_size * info->y_size) ); 1351 1274 barrier_wait( &local_barrier , info->cores_nr ); 1352 1275 ///////////////////////////////////////////////////////////////////////////////// … … 1354 1277 #if DEBUG_KERNEL_INIT 1355 1278 if( (core_lid == 0) & (local_cxy == 0) ) 1356 printk("\n[DBG] %s : exit barrier 7 : dev_root = %l in cluster 0/ cycle %d\n",1357 __FUNCTION__, devfs_dev_inode_xp, (uint32_t)hal_get_cycles() );1279 printk("\n[DBG] %s : exit barrier 7 : DEV initialized in cluster 0 / sr %x / cycle %d\n", 1280 __FUNCTION__, (uint32_t)hal_get_sr(), (uint32_t)hal_get_cycles() ); 1358 1281 #endif 1359 1282 … … 1373 1296 1374 1297 ///////////////////////////////////////////////////////////////////////////////// 1375 if( core_lid == 0 ) remote_barrier( XPTR( 0 , &global_barrier ), // [FIXME]1376 cluster_info_nb_actives(info->cluster_info) );1298 if( core_lid == 0 ) xbarrier_wait( XPTR( 0 , &global_barrier ), 1299 (info->x_size * info->y_size) ); 1377 1300 barrier_wait( &local_barrier , info->cores_nr ); 1378 1301 ///////////////////////////////////////////////////////////////////////////////// … … 1380 1303 #if DEBUG_KERNEL_INIT 1381 1304 if( (core_lid == 0) & (local_cxy == 0) ) 1382 printk("\n[DBG] %s : exit barrier 8 : process init created / cycle %d\n",1383 __FUNCTION__ 1305 printk("\n[DBG] %s : exit barrier 8 : process init created / sr %x / cycle %d\n", 1306 __FUNCTION__, (uint32_t)hal_get_sr(), (uint32_t)hal_get_cycles() ); 1384 1307 #endif 1385 1308 1386 1309 #if (DEBUG_KERNEL_INIT & 1) 1387 if( (core_lid == 0) /*& (local_cxy == 0)*/)1310 if( (core_lid == 0) & (local_cxy == 0) ) 1388 1311 sched_display( 0 ); 1389 1312 #endif … … 1393 1316 ///////////////////////////////////////////////////////////////////////////////// 1394 1317 1395 if( (core_lid == 0) && (local_cxy == 0) ) // [FIXME]1318 if( (core_lid == 0) && (local_cxy == 0) ) 1396 1319 { 1397 1320 print_banner( (info->x_size * info->y_size) , info->cores_nr ); … … 1415 1338 " - list item : %d bytes\n" 1416 1339 " - xlist item : %d bytes\n" 1417 " - spinlock : %d bytes\n" 1418 " - remote spinlock : %d bytes\n" 1340 " - busylock : %d bytes\n" 1341 " - remote busylock : %d bytes\n" 1342 " - queuelock : %d bytes\n" 1343 " - remote queuelock : %d bytes\n" 1419 1344 " - rwlock : %d bytes\n" 1420 1345 " - remote rwlock : %d bytes\n", 1421 sizeof( thread_t ), 1422 sizeof( process_t ), 1423 sizeof( cluster_t ), 1424 sizeof( chdev_t ), 1425 sizeof( core_t ), 1426 sizeof( scheduler_t ), 1427 sizeof( remote_fifo_t ), 1428 sizeof( page_t ), 1429 sizeof( mapper_t ), 1430 sizeof( ppm_t ), 1431 sizeof( kcm_t ), 1432 sizeof( khm_t ), 1433 sizeof( vmm_t ), 1434 sizeof( gpt_t ), 1435 sizeof( list_entry_t ), 1436 sizeof( xlist_entry_t ), 1437 sizeof( spinlock_t ), 1438 sizeof( remote_spinlock_t ), 1439 sizeof( rwlock_t ), 1440 sizeof( remote_rwlock_t )); 1346 sizeof( thread_t ), 1347 sizeof( process_t ), 1348 sizeof( cluster_t ), 1349 sizeof( chdev_t ), 1350 sizeof( core_t ), 1351 sizeof( scheduler_t ), 1352 sizeof( remote_fifo_t ), 1353 sizeof( page_t ), 1354 sizeof( mapper_t ), 1355 sizeof( ppm_t ), 1356 sizeof( kcm_t ), 1357 sizeof( khm_t ), 1358 sizeof( vmm_t ), 1359 sizeof( gpt_t ), 1360 sizeof( list_entry_t ), 1361 sizeof( xlist_entry_t ), 1362 sizeof( busylock_t ), 1363 sizeof( remote_busylock_t ), 1364 sizeof( queuelock_t ), 1365 sizeof( remote_queuelock_t ), 1366 sizeof( rwlock_t ), 1367 sizeof( remote_rwlock_t )); 1441 1368 #endif 1442 1369 -
trunk/kernel/kern/printk.c
r502 r564 26 26 #include <hal_special.h> 27 27 #include <dev_txt.h> 28 #include <remote_ spinlock.h>28 #include <remote_busylock.h> 29 29 #include <cluster.h> 30 30 #include <thread.h> … … 201 201 // @ args : va_list of arguments. 202 202 ////////////////////////////////////////////////////////////////////////////////////// 203 static void kernel_printf( c har* format,204 va_list * args )203 static void kernel_printf( const char * format, 204 va_list * args ) 205 205 { 206 206 … … 352 352 { 353 353 va_list args; 354 reg_t save_sr;355 354 356 355 // get pointers on TXT0 chdev … … 359 358 chdev_t * txt0_ptr = GET_PTR( txt0_xp ); 360 359 361 // get extended pointer on remote TXT0 chdevlock360 // get extended pointer on remote TXT0 lock 362 361 xptr_t lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock ); 363 362 364 // get TXT0 lock in busy waiting mode365 remote_ spinlock_lock_busy( lock_xp , &save_sr);366 367 // call kernel_printf on TXT0,in busy waiting mode363 // get TXT0 lock 364 remote_busylock_acquire( lock_xp ); 365 366 // display format on TXT0 in busy waiting mode 368 367 va_start( args , format ); 369 368 kernel_printf( format , &args ); 370 369 va_end( args ); 371 370 372 // release lock373 remote_ spinlock_unlock_busy( lock_xp , save_sr);371 // release TXT0 lock 372 remote_busylock_release( lock_xp ); 374 373 } 375 374 … … 386 385 387 386 //////////////////////////////////// 388 void __panic( const char * file_name,389 390 391 392 393 387 void panic( const char * file_name, 388 const char * function_name, 389 uint32_t line, 390 cycle_t cycle, 391 const char * format, 392 ... ) 394 393 { 395 394 // get pointers on TXT0 chdev … … 399 398 400 399 // get extended pointer on remote TXT0 lock 401 xptr_t lock_ txt0_xp = XPTR(txt0_cxy, &txt0_ptr->wait_lock);402 403 // get TXT0 lock in busy waiting mode404 {405 uint32_t save_sr; 406 remote_spinlock_lock_busy(lock_txt0_xp, &save_sr);407 408 thread_t *current = CURRENT_THREAD; 409 nolock_printk(410 "\n[PANIC] in %s: line %d | funct %s | cycle %d\n"411 " core[%x,%d] | thread %x in process %x\n"412 " | thread_ptr %x | procress_ptr %x\n",400 xptr_t lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock ); 401 402 // get TXT0 lock 403 remote_busylock_acquire( lock_xp ); 404 405 // get calling thread 406 thread_t * current = CURRENT_THREAD; 407 408 // print generic infos 409 nolock_printk( 410 "\n[PANIC] in %s: line %d | function %s | cycle %d\n" 411 "core[%x,%d] | thread %x (%x) in process %x (%x)\n", 413 412 file_name, line, function_name, (uint32_t) cycle, 414 local_cxy, current->core->lid, current->trdid, current->process->pid,415 current , current->process);416 417 // call kernel_printf on TXT0, in busy waiting to print format 418 va_list args;419 va_start(args, format);420 kernel_printf(format, &args);421 va_end(args);422 423 // release TXT0 lock 424 remote_spinlock_unlock_busy(lock_txt0_xp, save_sr);425 }413 local_cxy, current->core->lid, 414 current->trdid, current, 415 current->process->pid, current->process ); 416 417 // call kernel_printf to print format 418 va_list args; 419 va_start(args, format); 420 kernel_printf(format, &args); 421 va_end(args); 422 423 // release TXT0 lock 424 remote_busylock_release( lock_xp ); 426 425 427 426 // suicide … … 432 431 void puts( char * string ) 433 432 { 434 uint32_t save_sr;435 433 uint32_t n = 0; 436 434 … … 443 441 chdev_t * txt0_ptr = GET_PTR( txt0_xp ); 444 442 445 // get extended pointer on remote TXT0 chdevlock443 // get extended pointer on remote TXT0 lock 446 444 xptr_t lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock ); 447 445 448 // get TXT0 lock in busy waiting mode449 remote_ spinlock_lock_busy( lock_xp , &save_sr);446 // get TXT0 lock 447 remote_busylock_acquire( lock_xp ); 450 448 451 449 // display string on TTY0 452 450 dev_txt_sync_write( string , n ); 453 451 454 // release TXT0 lock in busy waiting mode455 remote_ spinlock_unlock_busy( lock_xp , save_sr);452 // release TXT0 lock 453 remote_busylock_release( lock_xp ); 456 454 } 457 455 … … 464 462 char buf[10]; 465 463 uint32_t c; 466 uint32_t save_sr;467 464 468 465 buf[0] = '0'; … … 484 481 xptr_t lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock ); 485 482 486 // get TXT0 lock in busy waiting mode487 remote_ spinlock_lock_busy( lock_xp , &save_sr);483 // get TXT0 lock 484 remote_busylock_acquire( lock_xp ); 488 485 489 486 // display string on TTY0 490 487 dev_txt_sync_write( buf , 10 ); 491 488 492 // release TXT0 lock in busy waiting mode493 remote_ spinlock_unlock_busy( lock_xp , save_sr);489 // release TXT0 lock 490 remote_busylock_release( lock_xp ); 494 491 } 495 492 … … 501 498 char buf[18]; 502 499 uint32_t c; 503 uint32_t save_sr;504 500 505 501 buf[0] = '0'; … … 521 517 xptr_t lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock ); 522 518 523 // get TXT0 lock in busy waiting mode524 remote_ spinlock_lock_busy( lock_xp , &save_sr);519 // get TXT0 lock 520 remote_busylock_acquire( lock_xp ); 525 521 526 522 // display string on TTY0 527 523 dev_txt_sync_write( buf , 18 ); 528 524 529 // release TXT0 lock in busy waiting mode530 remote_ spinlock_unlock_busy( lock_xp , save_sr);525 // release TXT0 lock 526 remote_busylock_release( lock_xp ); 531 527 } 532 528 -
trunk/kernel/kern/printk.h
r502 r564 28 28 // - The printk() function displays kernel messages on the kernel terminal TXT0, 29 29 // using a busy waiting policy: It calls directly the relevant TXT driver, 30 // after taking the TXT0 chdevlock for exclusive access to the TXT0 terminal.30 // after taking the TXT0 busylock for exclusive access to the TXT0 terminal. 31 31 // - The user_printk() function displays messages on the calling thread private 32 32 // terminal, using a descheduling policy: it register the request in the selected … … 67 67 /********************************************************************************** 68 68 * This function displays a formatted string on the kernel terminal TXT0, 69 * using a busy waiting policy: It calls directly the relevant TXT driver,70 69 * after taking the TXT0 lock. 70 * It uses a busy waiting policy, calling directly the relevant TXT driver, 71 71 ********************************************************************************** 72 72 * @ format : formatted string. … … 76 76 /********************************************************************************** 77 77 * This function displays a formatted string on the kernel terminal TXT0, 78 * using a busy waiting policy: It calls directly the relevant TXT driver,79 78 * without taking the TXT0 lock. 79 * It uses a busy waiting policy, calling directly the relevant TXT driver, 80 80 ********************************************************************************** 81 81 * @ format : formatted string. … … 85 85 86 86 /********************************************************************************** 87 * Private function designed to be called by the assert macro (below) 87 * This function is called in case of kernel panic. It printt a detailed message 88 * on the TXT0 terminal after taking the TXT0 lock, and call the hal_core_sleep() 89 * function to block the calling core. It is used by the assert macro (below). 88 90 ********************************************************************************** 89 91 * @ file_name : File where the assert macro was invoked … … 96 98 * See assert macro documentation for information about printed information. 97 99 *********************************************************************************/ 98 void __panic( const char * file_name, 99 const char * function_name, 100 uint32_t line, 101 cycle_t cycle, 102 const char * format, 103 ... ) 104 __attribute__((__noreturn__)); 100 void panic( const char * file_name, 101 const char * function_name, 102 uint32_t line, 103 cycle_t cycle, 104 const char * format, 105 ... ) __attribute__((__noreturn__)); 105 106 106 107 /********************************************************************************** … … 134 135 * @ format : formatted string 135 136 *********************************************************************************/ 136 #define assert( expr, format, ... ) { uint32_t __line_at_expansion = __LINE__; \ 137 const volatile cycle_t __assert_cycle = hal_get_cycles(); \ 138 if ( ( expr ) == false ) { \ 139 __panic( __FILE__, __FUNCTION__, \ 140 __line_at_expansion, __assert_cycle, \ 141 ( format ), ##__VA_ARGS__ ); \ 142 } \ 137 #define assert( expr, format, ... ) \ 138 { \ 139 uint32_t __line_at_expansion = __LINE__; \ 140 const volatile cycle_t __assert_cycle = hal_get_cycles(); \ 141 if ( ( expr ) == false ) \ 142 { \ 143 panic( __FILE__, __FUNCTION__, \ 144 __line_at_expansion, __assert_cycle, \ 145 ( format ), ##__VA_ARGS__ ); \ 146 } \ 143 147 } 144 148 … … 168 172 169 173 170 171 /* deprecated march 2018 [AG]172 173 #if CONFIG_CHDEV_DEBUG174 #define chdev_dmsg(...) if(hal_time_stamp() > CONFIG_CHDEV_DEBUG) printk(__VA_ARGS__)175 #else176 #define chdev_dmsg(...)177 #endif178 179 #if CONFIG_CLUSTER_DEBUG180 #define cluster_dmsg(...) if(hal_time_stamp() > CONFIG_CLUSTER_DEBUG) printk(__VA_ARGS__)181 #else182 #define cluster_dmsg(...)183 #endif184 185 #if CONFIG_CONTEXT_DEBUG186 #define context_dmsg(...) if(hal_time_stamp() > CONFIG_CONTEXT_DEBUG) printk(__VA_ARGS__)187 #else188 #define context_dmsg(...)189 #endif190 191 #if CONFIG_CORE_DEBUG192 #define core_dmsg(...) if(hal_time_stamp() > CONFIG_CORE_DEBUG) printk(__VA_ARGS__)193 #else194 #define core_dmsg(...)195 #endif196 197 #if CONFIG_DEVFS_DEBUG198 #define devfs_dmsg(...) if(hal_time_stamp() > CONFIG_DEVFS_DEBUG) printk(__VA_ARGS__)199 #else200 #define devfs_dmsg(...)201 #endif202 203 #if CONFIG_DMA_DEBUG204 #define dma_dmsg(...) if(hal_time_stamp() > CONFIG_DMA_DEBUG) printk(__VA_ARGS__)205 #else206 #define dma_dmsg(...)207 #endif208 209 #if CONFIG_DQDT_DEBUG210 #define dqdt_dmsg(...) if(hal_time_stamp() > CONFIG_DQDT_DEBUG) printk(__VA_ARGS__)211 #else212 #define dqdt_dmsg(...)213 #endif214 215 #if CONFIG_ELF_DEBUG216 #define elf_dmsg(...) if(hal_time_stamp() > CONFIG_ELF_DEBUG) printk(__VA_ARGS__)217 #else218 #define elf_dmsg(...)219 #endif220 221 #if CONFIG_EXEC_DEBUG222 #define exec_dmsg(...) if(hal_time_stamp() > CONFIG_EXEC_DEBUG) printk(__VA_ARGS__)223 #else224 #define exec_dmsg(...)225 #endif226 227 #if CONFIG_EXCP_DEBUG228 #define excp_dmsg(...) if(hal_time_stamp() > CONFIG_EXCP_DEBUG) printk(__VA_ARGS__)229 #else230 #define excp_dmsg(...)231 #endif232 233 #if CONFIG_FATFS_DEBUG234 #define fatfs_dmsg(...) if(hal_time_stamp() > CONFIG_FATFS_DEBUG) printk(__VA_ARGS__)235 #else236 #define fatfs_dmsg(...)237 #endif238 239 #if CONFIG_FBF_DEBUG240 #define fbf_dmsg(...) if(hal_time_stamp() > CONFIG_FBF_DEBUG) printk(__VA_ARGS__)241 #else242 #define fbf_dmsg(...)243 #endif244 245 #if CONFIG_FORK_DEBUG246 #define fork_dmsg(...) if(hal_time_stamp() > CONFIG_FORK_DEBUG) printk(__VA_ARGS__)247 #else248 #define fork_dmsg(...)249 #endif250 251 #if CONFIG_GPT_DEBUG252 #define gpt_dmsg(...) if(hal_time_stamp() > CONFIG_GPT_DEBUG) printk(__VA_ARGS__)253 #else254 #define gpt_dmsg(...)255 #endif256 257 #if CONFIG_GRPC_DEBUG258 #define grpc_dmsg(...) if(hal_time_stamp() > CONFIG_GRPC_DEBUG) printk(__VA_ARGS__)259 #else260 #define grpc_dmsg(...)261 #endif262 263 #if CONFIG_IDLE_DEBUG264 #define idle_dmsg(...) if(hal_time_stamp() > CONFIG_IDLE_DEBUG) printk(__VA_ARGS__)265 #else266 #define idle_dmsg(...)267 #endif268 269 #if CONFIG_IOC_DEBUG270 #define ioc_dmsg(...) if(hal_time_stamp() > CONFIG_IOC_DEBUG) printk(__VA_ARGS__)271 #else272 #define ioc_dmsg(...)273 #endif274 275 #if CONFIG_IRQ_DEBUG276 #define irq_dmsg(...) if(hal_time_stamp() > CONFIG_IRQ_DEBUG) printk(__VA_ARGS__)277 #else278 #define irq_dmsg(...)279 #endif280 281 #if CONFIG_KCM_DEBUG282 #define kcm_dmsg(...) if(hal_time_stamp() > CONFIG_KCM_DEBUG) printk(__VA_ARGS__)283 #else284 #define kcm_dmsg(...)285 #endif286 287 #if CONFIG_KHM_DEBUG288 #define khm_dmsg(...) if(hal_time_stamp() > CONFIG_KHM_DEBUG) printk(__VA_ARGS__)289 #else290 #define khm_dmsg(...)291 #endif292 293 #if CONFIG_KILL_DEBUG294 #define kill_dmsg(...) if(hal_time_stamp() > CONFIG_KILL_DEBUG) printk(__VA_ARGS__)295 #else296 #define kill_dmsg(...)297 #endif298 299 #if CONFIG_KINIT_DEBUG300 #define kinit_dmsg(...) if(hal_time_stamp() > CONFIG_KINIT_DEBUG) printk(__VA_ARGS__)301 #else302 #define kinit_dmsg(...)303 #endif304 305 #if CONFIG_KMEM_DEBUG306 #define kmem_dmsg(...) if(hal_time_stamp() > CONFIG_KMEM_DEBUG) printk(__VA_ARGS__)307 #else308 #define kmem_dmsg(...)309 #endif310 311 #if CONFIG_MAPPER_DEBUG312 #define mapper_dmsg(...) if(hal_time_stamp() > CONFIG_MAPPER_DEBUG) printk(__VA_ARGS__)313 #else314 #define mapper_dmsg(...)315 #endif316 317 #if CONFIG_MMAP_DEBUG318 #define mmap_dmsg(...) if(hal_time_stamp() > CONFIG_MMAP_DEBUG) printk(__VA_ARGS__)319 #else320 #define mmap_dmsg(...)321 #endif322 323 #if CONFIG_MMC_DEBUG324 #define mmc_dmsg(...) if(hal_time_stamp() > CONFIG_MMC_DEBUG) printk(__VA_ARGS__)325 #else326 #define mmc_dmsg(...)327 #endif328 329 #if CONFIG_NIC_DEBUG330 #define nic_dmsg(...) if(hal_time_stamp() > CONFIG_NIC_DEBUG) printk(__VA_ARGS__)331 #else332 #define nic_dmsg(...)333 #endif334 335 #if CONFIG_PIC_DEBUG336 #define pic_dmsg(...) if(hal_time_stamp() > CONFIG_PIC_DEBUG) printk(__VA_ARGS__)337 #else338 #define pic_dmsg(...)339 #endif340 341 #if CONFIG_PPM_DEBUG342 #define ppm_dmsg(...) if(hal_time_stamp() > CONFIG_PPM_DEBUG) printk(__VA_ARGS__)343 #else344 #define ppm_dmsg(...)345 #endif346 347 #if CONFIG_PROCESS_DEBUG348 #define process_dmsg(...) if(hal_time_stamp() > CONFIG_PROCESS_DEBUG) printk(__VA_ARGS__)349 #else350 #define process_dmsg(...)351 #endif352 353 #if CONFIG_READ_DEBUG354 #define read_dmsg(...) if(hal_time_stamp() > CONFIG_READ_DEBUG) printk(__VA_ARGS__)355 #else356 #define read_dmsg(...)357 #endif358 359 #if CONFIG_RPC_DEBUG360 #define rpc_dmsg(...) if(hal_time_stamp() > CONFIG_RPC_DEBUG) printk(__VA_ARGS__)361 #else362 #define rpc_dmsg(...)363 #endif364 365 #if CONFIG_SCHED_DEBUG366 #define sched_dmsg(...) if(hal_time_stamp() > CONFIG_SCHED_DEBUG) printk(__VA_ARGS__)367 #else368 #define sched_dmsg(...)369 #endif370 371 #if CONFIG_SIGACTION_DEBUG372 #define sigaction_dmsg(...) if(hal_time_stamp() > CONFIG_SIGACTION_DEBUG) printk(__VA_ARGS__)373 #else374 #define sigaction_dmsg(...)375 #endif376 377 #if CONFIG_SYSCALL_DEBUG378 #define syscall_dmsg(...) if(hal_time_stamp() > CONFIG_SYSCALL_DEBUG) printk(__VA_ARGS__)379 #else380 #define syscall_dmsg(...)381 #endif382 383 #if CONFIG_THREAD_DEBUG384 #define thread_dmsg(...) if(hal_time_stamp() > CONFIG_THREAD_DEBUG) printk(__VA_ARGS__)385 #else386 #define thread_dmsg(...)387 #endif388 389 #if CONFIG_TXT_DEBUG390 #define txt_dmsg(...) if(hal_time_stamp() > CONFIG_TXT_DEBUG) printk(__VA_ARGS__)391 #else392 #define txt_dmsg(...)393 #endif394 395 #if CONFIG_VFS_DEBUG396 #define vfs_dmsg(...) if(hal_time_stamp() > CONFIG_VFS_DEBUG) printk(__VA_ARGS__)397 #else398 #define vfs_dmsg(...)399 #endif400 401 #if CONFIG_VMM_DEBUG402 #define vmm_dmsg(...) if(hal_time_stamp() > CONFIG_VMM_DEBUG) printk(__VA_ARGS__)403 #else404 #define vmm_dmsg(...)405 #endif406 407 #if CONFIG_WRITE_DEBUG408 #define write_dmsg(...) if(hal_time_stamp() > CONFIG_WRITE_DEBUG) printk(__VA_ARGS__)409 #else410 #define write_dmsg(...)411 #endif412 413 */414 415 174 #endif // _PRINTK_H 416 175 -
trunk/kernel/kern/process.c
r527 r564 1 1 /* 2 * process.c - process related management2 * process.c - process related functions definition. 3 3 * 4 4 * Authors Ghassan Almaless (2008,2009,2010,2011,2012) … … 43 43 #include <string.h> 44 44 #include <scheduler.h> 45 #include <remote_spinlock.h> 45 #include <busylock.h> 46 #include <queuelock.h> 47 #include <remote_queuelock.h> 48 #include <rwlock.h> 49 #include <remote_rwlock.h> 46 50 #include <dqdt.h> 47 51 #include <cluster.h> … … 114 118 115 119 // get parent_pid 116 parent_pid = hal_remote_l w( XPTR( parent_cxy , &parent_ptr->pid ) );120 parent_pid = hal_remote_l32( XPTR( parent_cxy , &parent_ptr->pid ) ); 117 121 118 122 #if DEBUG_PROCESS_REFERENCE_INIT … … 132 136 // initialize vmm as empty 133 137 error = vmm_init( process ); 134 assert( (error == 0) , "cannot initialize VMM\n" ); 138 139 assert( (error == 0) , "cannot initialize VMM\n" ); 135 140 136 141 #if (DEBUG_PROCESS_REFERENCE_INIT & 1) … … 138 143 if( DEBUG_PROCESS_REFERENCE_INIT ) 139 144 printk("\n[DBG] %s : thread %x in process %x / vmm empty for process %x / cycle %d\n", 140 __FUNCTION__, CURRENT_THREAD->trdid, parent_pid , cycle );145 __FUNCTION__, CURRENT_THREAD->trdid, parent_pid , pid, cycle ); 141 146 #endif 142 147 … … 160 165 __FUNCTION__, CURRENT_THREAD->trdid, parent_pid, pid, txt_id, cycle ); 161 166 #endif 162 163 164 165 167 // build path to TXT_RX[i] and TXT_TX[i] chdevs 166 168 snprintf( rx_path , 40 , "/dev/external/txt%d_rx", txt_id ); … … 175 177 &stdin_id ); 176 178 177 178 179 assert( (error == 0) , "cannot open stdin pseudo file" ); 180 assert( (stdin_id == 0) , "stdin index must be 0" ); 179 181 180 182 #if (DEBUG_PROCESS_REFERENCE_INIT & 1) … … 225 227 { 226 228 // get extended pointer on stdin pseudo file in parent process 227 file_xp = (xptr_t)hal_remote_l wd( XPTR( parent_cxy , &parent_ptr->fd_array.array[0] ) );229 file_xp = (xptr_t)hal_remote_l64( XPTR( parent_cxy , &parent_ptr->fd_array.array[0] ) ); 228 230 229 231 // get extended pointer on parent process TXT chdev … … 234 236 chdev_ptr = GET_PTR( chdev_xp ); 235 237 236 // get TXT terminal index237 txt_id = hal_remote_l w( XPTR( chdev_cxy , &chdev_ptr->channel ) );238 239 // attach process to TXT[txt_id]238 // get parent process TXT terminal index 239 txt_id = hal_remote_l32( XPTR( chdev_cxy , &chdev_ptr->channel ) ); 240 241 // attach child process to parent process TXT terminal 240 242 process_txt_attach( process , txt_id ); 241 243 … … 246 248 247 249 // initialize specific inodes root and cwd 248 process->vfs_root_xp = (xptr_t)hal_remote_l wd( XPTR( parent_cxy,250 process->vfs_root_xp = (xptr_t)hal_remote_l64( XPTR( parent_cxy, 249 251 &parent_ptr->vfs_root_xp ) ); 250 process->vfs_cwd_xp = (xptr_t)hal_remote_l wd( XPTR( parent_cxy,252 process->vfs_cwd_xp = (xptr_t)hal_remote_l64( XPTR( parent_cxy, 251 253 &parent_ptr->vfs_cwd_xp ) ); 252 254 vfs_inode_remote_up( process->vfs_root_xp ); 253 255 vfs_inode_remote_up( process->vfs_cwd_xp ); 254 256 255 remote_rwlock_init( XPTR( local_cxy , &process->cwd_lock ) );257 remote_rwlock_init( XPTR( local_cxy , &process->cwd_lock ), LOCK_PROCESS_CWD ); 256 258 257 259 #if (DEBUG_PROCESS_REFERENCE_INIT & 1) 258 260 cycle = (uint32_t)hal_get_cycles(); 259 261 if( DEBUG_PROCESS_REFERENCE_INIT ) 260 printk("\n[DBG] %s : thread %x /fd_array for process %x / cycle %d\n",261 __FUNCTION__ , CURRENT_THREAD, pid , cycle );262 printk("\n[DBG] %s : thread %x in process %x / set fd_array for process %x / cycle %d\n", 263 __FUNCTION__, CURRENT_THREAD->trdid, parent_pid, pid , cycle ); 262 264 #endif 263 265 … … 265 267 xlist_root_init( XPTR( local_cxy , &process->children_root ) ); 266 268 process->children_nr = 0; 267 remote_ spinlock_init( XPTR( local_cxy , &process->children_lock ));269 remote_queuelock_init( XPTR( local_cxy , &process->children_lock ), LOCK_PROCESS_CHILDREN ); 268 270 269 271 // reset semaphore / mutex / barrier / condvar list roots … … 272 274 xlist_root_init( XPTR( local_cxy , &process->barrier_root ) ); 273 275 xlist_root_init( XPTR( local_cxy , &process->condvar_root ) ); 274 remote_ spinlock_init( XPTR( local_cxy , &process->sync_lock ));276 remote_queuelock_init( XPTR( local_cxy , &process->sync_lock ), LOCK_PROCESS_USERSYNC ); 275 277 276 278 // register new process in the local cluster manager pref_tbl[] … … 284 286 cluster_process_copies_link( process ); 285 287 286 // reset th_tbl[] array as empty in process descriptor288 // initialize th_tbl[] array and associated threads 287 289 uint32_t i; 288 for( i = 0 ; i < CONFIG_THREAD_MAX_PER_CLUSTER ; i++ ) 290 291 for( i = 0 ; i < CONFIG_THREADS_MAX_PER_CLUSTER ; i++ ) 289 292 { 290 293 process->th_tbl[i] = NULL; 291 294 } 292 295 process->th_nr = 0; 293 spinlock_init( &process->th_lock);296 rwlock_init( &process->th_lock , LOCK_PROCESS_THTBL ); 294 297 295 298 hal_fence(); … … 298 301 cycle = (uint32_t)hal_get_cycles(); 299 302 if( DEBUG_PROCESS_REFERENCE_INIT ) 300 printk("\n[DBG] %s : thread %x exit /process %x / cycle %d\n",301 __FUNCTION__ , CURRENT_THREAD , pid, cycle );303 printk("\n[DBG] %s : thread %x in process %x exit for process %x / cycle %d\n", 304 __FUNCTION__, CURRENT_THREAD->trdid, parent_pid, pid, cycle ); 302 305 #endif 303 306 … … 315 318 316 319 // initialize PID, REF_XP, PARENT_XP, and STATE 317 local_process->pid = hal_remote_l w( XPTR( ref_cxy , &ref_ptr->pid ) );318 local_process->parent_xp = hal_remote_l wd( XPTR( ref_cxy , &ref_ptr->parent_xp ) );320 local_process->pid = hal_remote_l32( XPTR( ref_cxy , &ref_ptr->pid ) ); 321 local_process->parent_xp = hal_remote_l64( XPTR( ref_cxy , &ref_ptr->parent_xp ) ); 319 322 local_process->ref_xp = reference_process_xp; 320 323 local_process->owner_xp = reference_process_xp; 321 324 local_process->term_state = 0; 322 325 323 #if DEBUG_PROCESS_COPY_INIT 326 #if DEBUG_PROCESS_COPY_INIT 327 thread_t * this = CURRET_THREAD; 324 328 uint32_t cycle = (uint32_t)hal_get_cycles(); 325 329 if( DEBUG_PROCESS_COPY_INIT ) 326 printk("\n[DBG] %s : thread %x enter for process %x\n", 327 __FUNCTION__ , CURRENT_THREAD , local_process->pid ); 328 #endif 330 printk("\n[DBG] %s : thread %x in process %x enter for process %x / cycle %d\n", 331 __FUNCTION__, this->trdid, this->process->pid, local_process->pid, cycle ); 332 #endif 333 334 // check user process 335 assert( (local_process->pid != 0), "PID cannot be 0" ); 329 336 330 337 // reset local process vmm … … 336 343 337 344 // reset vfs_root_xp / vfs_bin_xp / vfs_cwd_xp fields 338 local_process->vfs_root_xp = hal_remote_l wd( XPTR( ref_cxy , &ref_ptr->vfs_root_xp ) );339 local_process->vfs_bin_xp = hal_remote_l wd( XPTR( ref_cxy , &ref_ptr->vfs_bin_xp ) );345 local_process->vfs_root_xp = hal_remote_l64( XPTR( ref_cxy , &ref_ptr->vfs_root_xp ) ); 346 local_process->vfs_bin_xp = hal_remote_l64( XPTR( ref_cxy , &ref_ptr->vfs_bin_xp ) ); 340 347 local_process->vfs_cwd_xp = XPTR_NULL; 341 348 … … 343 350 xlist_root_init( XPTR( local_cxy , &local_process->children_root ) ); 344 351 local_process->children_nr = 0; 345 remote_spinlock_init( XPTR( local_cxy , &local_process->children_lock ) ); 352 remote_queuelock_init( XPTR( local_cxy , &local_process->children_lock ), 353 LOCK_PROCESS_CHILDREN ); 346 354 347 355 // reset children_list (not used in a process descriptor copy) … … 354 362 xlist_root_init( XPTR( local_cxy , &local_process->condvar_root ) ); 355 363 356 // reset th_tbl[] array as empty364 // initialize th_tbl[] array and associated fields 357 365 uint32_t i; 358 for( i = 0 ; i < CONFIG_THREAD _MAX_PER_CLUSTER ; i++ )366 for( i = 0 ; i < CONFIG_THREADS_MAX_PER_CLUSTER ; i++ ) 359 367 { 360 368 local_process->th_tbl[i] = NULL; 361 369 } 362 370 local_process->th_nr = 0; 363 spinlock_init( &local_process->th_lock ); 371 rwlock_init( &local_process->th_lock , LOCK_PROCESS_THTBL ); 372 364 373 365 374 // register new process descriptor in local cluster manager local_list … … 374 383 cycle = (uint32_t)hal_get_cycles(); 375 384 if( DEBUG_PROCESS_COPY_INIT ) 376 printk("\n[DBG] %s : thread %x exit for process %x\n",377 __FUNCTION__ , CURRENT_THREAD , local_process->pid);385 printk("\n[DBG] %s : thread %x in process %x exit for process %x / cycle %d\n", 386 __FUNCTION__, this->trdid, this->process->pid, local_process->pid, cycle ); 378 387 #endif 379 388 … … 399 408 uint32_t cycle = (uint32_t)hal_get_cycles(); 400 409 if( DEBUG_PROCESS_DESTROY ) 401 printk("\n[DBG] %s : thread %x enter for process %x in cluster %x / cycle %d\n",402 __FUNCTION__ , CURRENT_THREAD , pid , local_cxy, cycle );410 printk("\n[DBG] %s : thread %x in process %x enter for process %x in cluster %x / cycle %d\n", 411 __FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid, pid, local_cxy, cycle ); 403 412 #endif 404 413 … … 423 432 424 433 // remove process from children_list 425 remote_ spinlock_lock( children_lock_xp );434 remote_queuelock_acquire( children_lock_xp ); 426 435 xlist_unlink( XPTR( local_cxy , &process->children_list ) ); 427 436 hal_remote_atomic_add( children_nr_xp , -1 ); 428 remote_spinlock_unlock( children_lock_xp ); 429 430 // release the process PID to cluster manager 431 cluster_pid_release( pid ); 432 433 } 434 435 // FIXME close all open files and update dirty [AG] 437 remote_queuelock_release( children_lock_xp ); 438 439 // release the process PID to cluster manager 440 cluster_pid_release( pid ); 441 } 442 443 // FIXME close all open files and synchronize dirty [AG] 436 444 437 445 // decrease refcount for bin file, root file and cwd file … … 449 457 cycle = (uint32_t)hal_get_cycles(); 450 458 if( DEBUG_PROCESS_DESTROY ) 451 printk("\n[DBG] %s : thread %x exit / destroyedprocess %x in cluster %x / cycle %d\n",452 __FUNCTION__ , CURRENT_THREAD, pid, local_cxy, cycle );459 printk("\n[DBG] %s : thread %x in process %x exit / process %x in cluster %x / cycle %d\n", 460 __FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid, pid, local_cxy, cycle ); 453 461 #endif 454 462 … … 491 499 remote_nr = 0; 492 500 501 // check calling thread can yield 502 assert( (client->busylocks == 0), 503 "cannot yield : busylocks = %d\n", client->busylocks ); 504 493 505 #if DEBUG_PROCESS_SIGACTION 494 506 uint32_t cycle = (uint32_t)hal_get_cycles(); … … 517 529 518 530 // The client thread send parallel RPCs to all remote clusters containing 519 // target process copies, wait all responses, and then handles directly the520 // th reads in local cluster, when required.531 // target process copies, wait all responses, and then handles directly 532 // the threads in local cluster, when required. 521 533 // The client thread allocates a - shared - RPC descriptor in the stack, 522 534 // because all parallel, non-blocking, server threads use the same input … … 529 541 thread_block( client_xp , THREAD_BLOCKED_RPC ); 530 542 531 // take the lock protecting thecopies532 remote_ spinlock_lock( lock_xp );543 // take the lock protecting process copies 544 remote_queuelock_acquire( lock_xp ); 533 545 534 546 // initialize shared RPC descriptor … … 573 585 574 586 // release the lock protecting process copies 575 remote_ spinlock_unlock( lock_xp );587 remote_queuelock_release( lock_xp ); 576 588 577 589 // restore IRQs … … 620 632 thread_t * target; // pointer on target thread 621 633 thread_t * this; // pointer on calling thread 622 uint32_t ltid; // index in process th_tbl 634 uint32_t ltid; // index in process th_tbl[] 623 635 cxy_t owner_cxy; // target process owner cluster 624 636 uint32_t count; // requests counter … … 628 640 this = CURRENT_THREAD; 629 641 642 #if DEBUG_PROCESS_SIGACTION 643 pid_t pid = process->pid; 644 uint32_t cycle = (uint32_t)hal_get_cycles(); 645 if( DEBUG_PROCESS_SIGACTION < cycle ) 646 printk("\n[DBG] %s : thread %x in process %x enter for process %x in cluster %x / cycle %d\n", 647 __FUNCTION__, this->trdid, this->process->pid, pid, local_cxy , cycle ); 648 #endif 649 650 // check target process is an user process 651 assert( ( process->pid != 0 ), 652 "target process must be an user process" ); 653 630 654 // get target process owner cluster 631 655 owner_cxy = CXY_FROM_PID( process->pid ); 632 656 633 #if DEBUG_PROCESS_SIGACTION634 uint32_t cycle = (uint32_t)hal_get_cycles();635 if( DEBUG_PROCESS_SIGACTION < cycle )636 printk("\n[DBG] %s : thread %x enter for process %x in cluster %x / cycle %d\n",637 __FUNCTION__ , this , process->pid , local_cxy , cycle );638 #endif639 640 657 // get lock protecting process th_tbl[] 641 spinlock_lock( &process->th_lock );658 rwlock_rd_acquire( &process->th_lock ); 642 659 643 660 // loop on target process local threads … … 680 697 681 698 // release lock protecting process th_tbl[] 682 spinlock_unlock( &process->th_lock ); 683 684 // wait acknowledges 699 rwlock_rd_release( &process->th_lock ); 700 701 // busy waiting acknowledges 702 // TODO this could be improved... 685 703 while( 1 ) 686 704 { … … 695 713 cycle = (uint32_t)hal_get_cycles(); 696 714 if( DEBUG_PROCESS_SIGACTION < cycle ) 697 printk("\n[DBG] %s : thread %x exit for process %x in cluster %x / cycle %d\n",698 __FUNCTION__ , this , process->pid, local_cxy , cycle );715 printk("\n[DBG] %s : thread %x in process %x exit for process %x in cluster %x / cycle %d\n", 716 __FUNCTION__, this, this->process->pid, pid, local_cxy , cycle ); 699 717 #endif 700 718 … … 719 737 720 738 #if DEBUG_PROCESS_SIGACTION 739 pid_t pid = process->pid; 721 740 uint32_t cycle = (uint32_t)hal_get_cycles(); 722 741 if( DEBUG_PROCESS_SIGACTION < cycle ) 723 printk("\n[DBG] %s : thread %x enter for process %x in cluster %x / cycle %d\n", 724 __FUNCTION__ , this , process->pid , local_cxy , cycle ); 725 #endif 742 printk("\n[DBG] %s : thread %x n process %x enter for process %x in cluster %x / cycle %d\n", 743 __FUNCTION__, this->trdid, this->process->pid, pid, local_cxy , cycle ); 744 #endif 745 746 // check target process is an user process 747 assert( ( process->pid != 0 ), 748 "target process must be an user process" ); 726 749 727 750 // get lock protecting process th_tbl[] 728 spinlock_lock( &process->th_lock );751 rwlock_rd_acquire( &process->th_lock ); 729 752 730 753 // loop on target process local threads … … 739 762 target_xp = XPTR( local_cxy , target ); 740 763 741 // main thread and client thread should not be blocked764 // main thread and client thread should not be deleted 742 765 if( ((ltid != 0) || (owner_cxy != local_cxy)) && // not main thread 743 766 (client_xp) != target_xp ) // not client thread … … 750 773 751 774 // release lock protecting process th_tbl[] 752 spinlock_unlock( &process->th_lock );775 rwlock_rd_release( &process->th_lock ); 753 776 754 777 #if DEBUG_PROCESS_SIGACTION 755 778 cycle = (uint32_t)hal_get_cycles(); 756 779 if( DEBUG_PROCESS_SIGACTION < cycle ) 757 printk("\n[DBG] %s : thread %x exit for process %x in cluster %x / cycle %d\n",758 __FUNCTION__ , this , process->pid, local_cxy , cycle );780 printk("\n[DBG] %s : thread %x in process %x exit for process %x in cluster %x / cycle %d\n", 781 __FUNCTION__, this->trdid, this->process->pid, pid, local_cxy , cycle ); 759 782 #endif 760 783 … … 773 796 774 797 #if DEBUG_PROCESS_SIGACTION 798 pid_t pid = process->pid; 775 799 uint32_t cycle = (uint32_t)hal_get_cycles(); 776 800 if( DEBUG_PROCESS_SIGACTION < cycle ) 777 printk("\n[DBG] %s : thread %x enter for process %x in cluster %x / cycle %d\n", 778 __FUNCTION__ , this , process->pid , local_cxy , cycle ); 779 #endif 801 printk("\n[DBG] %s : thread %x in process %x enter for process %x in cluster %x / cycle %d\n", 802 __FUNCTION__, this->trdid, this->process->pid, pid, local_cxy , cycle ); 803 #endif 804 805 // check target process is an user process 806 assert( ( process->pid != 0 ), 807 "target process must be an user process" ); 780 808 781 809 // get lock protecting process th_tbl[] 782 spinlock_lock( &process->th_lock );810 rwlock_rd_acquire( &process->th_lock ); 783 811 784 812 // loop on process threads to unblock all threads … … 798 826 799 827 // release lock protecting process th_tbl[] 800 spinlock_unlock( &process->th_lock );828 rwlock_rd_release( &process->th_lock ); 801 829 802 830 #if DEBUG_PROCESS_SIGACTION 803 831 cycle = (uint32_t)hal_get_cycles(); 804 832 if( DEBUG_PROCESS_SIGACTION < cycle ) 805 printk("\n[DBG] %s : thread %x exit for process %x in cluster %x / cycle %d\n",806 __FUNCTION__ , this , process->pid , local_cxy, cycle );833 printk("\n[DBG] %s : thread %x in process %x exit for process %x in cluster %x / cycle %d\n", 834 __FUNCTION__, this->trdid, this->process->pid, pid, local_cxy, cycle ); 807 835 #endif 808 836 … … 818 846 cluster_t * cluster = LOCAL_CLUSTER; 819 847 848 #if DEBUG_PROCESS_GET_LOCAL_COPY 849 thread_t * this = CURRENT_THREAD; 850 uint32_t cycle = (uint32_t)hal_get_cycles(); 851 if( DEBUG_PROCESS_GET_LOCAL_COPY < cycle ) 852 printk("\n[DBG] %s : thread %x in cluster %x enter for process %x in cluster %x / cycle %d\n", 853 __FUNCTION__, this->trdid, this->process->pid, pid, local_cxy, cycle ); 854 #endif 855 820 856 // get lock protecting local list of processes 821 remote_ spinlock_lock( XPTR( local_cxy , &cluster->pmgr.local_lock ) );857 remote_queuelock_acquire( XPTR( local_cxy , &cluster->pmgr.local_lock ) ); 822 858 823 859 // scan the local list of process descriptors to find the process … … 836 872 837 873 // release lock protecting local list of processes 838 remote_ spinlock_unlock( XPTR( local_cxy , &cluster->pmgr.local_lock ) );874 remote_queuelock_release( XPTR( local_cxy , &cluster->pmgr.local_lock ) ); 839 875 840 876 // allocate memory for a new local process descriptor … … 859 895 860 896 #if DEBUG_PROCESS_GET_LOCAL_COPY 861 uint32_tcycle = (uint32_t)hal_get_cycles();897 cycle = (uint32_t)hal_get_cycles(); 862 898 if( DEBUG_PROCESS_GET_LOCAL_COPY < cycle ) 863 printk("\n[DBG] %s : enter in cluster %x / pid%x / process %x / cycle %d\n",864 __FUNCTION__ , local_cxy , pid , process_ptr, cycle );899 printk("\n[DBG] %s : thread %x in cluster %x exit in cluster %x / process %x / cycle %d\n", 900 __FUNCTION__, this->trdid, this->process->pid, local_cxy, process_ptr, cycle ); 865 901 #endif 866 902 … … 883 919 884 920 // get pointers on parent process 885 parent_xp = (xptr_t)hal_remote_l wd( XPTR( process_cxy , &process_ptr->parent_xp ) );921 parent_xp = (xptr_t)hal_remote_l64( XPTR( process_cxy , &process_ptr->parent_xp ) ); 886 922 parent_cxy = GET_CXY( parent_xp ); 887 923 parent_ptr = GET_PTR( parent_xp ); 888 924 889 return hal_remote_l w( XPTR( parent_cxy , &parent_ptr->pid ) );925 return hal_remote_l32( XPTR( parent_cxy , &parent_ptr->pid ) ); 890 926 } 891 927 … … 899 935 uint32_t fd; 900 936 901 remote_ spinlock_init( XPTR( local_cxy , &process->fd_array.lock ));937 remote_queuelock_init( XPTR( local_cxy , &process->fd_array.lock ), LOCK_PROCESS_FDARRAY ); 902 938 903 939 process->fd_array.current = 0; … … 909 945 } 910 946 } 911 912 //////////////////////////////913 bool_t process_fd_array_full( void )914 {915 // get extended pointer on reference process916 xptr_t ref_xp = CURRENT_THREAD->process->ref_xp;917 918 // get reference process cluster and local pointer919 process_t * ref_ptr = GET_PTR( ref_xp );920 cxy_t ref_cxy = GET_CXY( ref_xp );921 922 // get number of open file descriptors from reference fd_array923 uint32_t current = hal_remote_lw( XPTR( ref_cxy , &ref_ptr->fd_array.current ) );924 925 return ( current >= CONFIG_PROCESS_FILE_MAX_NR );926 }927 928 947 ///////////////////////////////////////////////// 929 948 error_t process_fd_register( process_t * process, … … 933 952 bool_t found; 934 953 uint32_t id; 954 uint32_t count; 935 955 xptr_t xp; 936 956 … … 941 961 942 962 // take lock protecting reference fd_array 943 remote_ spinlock_lock( XPTR( ref_cxy , &ref_ptr->fd_array.lock ) );963 remote_queuelock_acquire( XPTR( ref_cxy , &ref_ptr->fd_array.lock ) ); 944 964 945 965 found = false; … … 947 967 for ( id = 0; id < CONFIG_PROCESS_FILE_MAX_NR ; id++ ) 948 968 { 949 xp = hal_remote_l wd( XPTR( ref_cxy , &ref_ptr->fd_array.array[id] ) );969 xp = hal_remote_l64( XPTR( ref_cxy , &ref_ptr->fd_array.array[id] ) ); 950 970 if ( xp == XPTR_NULL ) 951 971 { 972 // update reference fd_array 973 hal_remote_s64( XPTR( ref_cxy , &ref_ptr->fd_array.array[id] ) , file_xp ); 974 count = hal_remote_l32( XPTR( ref_cxy , &ref_ptr->fd_array.current ) ) + 1; 975 hal_remote_s32( XPTR( ref_cxy , &ref_ptr->fd_array.current ) , count ); 976 977 // update local fd_array copy if required 978 if( ref_cxy != local_cxy ) 979 { 980 process->fd_array.array[id] = file_xp; 981 process->fd_array.current = count; 982 } 983 984 // exit 985 *fdid = id; 952 986 found = true; 953 hal_remote_swd( XPTR( ref_cxy , &ref_ptr->fd_array.array[id] ) , file_xp );954 hal_remote_atomic_add( XPTR( ref_cxy , &ref_ptr->fd_array.current ) , 1 );955 *fdid = id;956 987 break; 957 988 } … … 959 990 960 991 // release lock protecting reference fd_array 961 remote_ spinlock_unlock( XPTR( ref_cxy , &ref_ptr->fd_array.lock ) );992 remote_queuelock_release( XPTR( ref_cxy , &ref_ptr->fd_array.lock ) ); 962 993 963 994 if ( !found ) return -1; … … 970 1001 { 971 1002 xptr_t file_xp; 1003 xptr_t lock_xp; 972 1004 973 1005 // access local copy of process descriptor … … 981 1013 process_t * ref_ptr = GET_PTR( ref_xp ); 982 1014 1015 // build extended pointer on lock protecting reference fd_array 1016 lock_xp = XPTR( ref_cxy , &ref_ptr->fd_array.lock ); 1017 1018 // take lock protecting reference fd_array 1019 remote_queuelock_acquire( lock_xp ); 1020 983 1021 // access reference process descriptor 984 file_xp = hal_remote_l wd( XPTR( ref_cxy , &ref_ptr->fd_array.array[fdid] ) );1022 file_xp = hal_remote_l64( XPTR( ref_cxy , &ref_ptr->fd_array.array[fdid] ) ); 985 1023 986 1024 // update local fd_array if found 987 if( file_xp != XPTR_NULL ) 988 {989 process->fd_array.array[fdid] = file_xp;990 } 1025 if( file_xp != XPTR_NULL ) process->fd_array.array[fdid] = file_xp; 1026 1027 // release lock protecting reference fd_array 1028 remote_queuelock_release( lock_xp ); 991 1029 } 992 1030 … … 1011 1049 1012 1050 // get the remote lock protecting the src fd_array 1013 remote_ spinlock_lock( XPTR( src_cxy , &src_ptr->lock ) );1051 remote_queuelock_acquire( XPTR( src_cxy , &src_ptr->lock ) ); 1014 1052 1015 1053 // loop on all fd_array entries 1016 1054 for( fd = 0 ; fd < CONFIG_PROCESS_FILE_MAX_NR ; fd++ ) 1017 1055 { 1018 entry = (xptr_t)hal_remote_l wd( XPTR( src_cxy , &src_ptr->array[fd] ) );1056 entry = (xptr_t)hal_remote_l64( XPTR( src_cxy , &src_ptr->array[fd] ) ); 1019 1057 1020 1058 if( entry != XPTR_NULL ) … … 1024 1062 1025 1063 // copy entry in destination process fd_array 1026 hal_remote_s wd( XPTR( dst_cxy , &dst_ptr->array[fd] ) , entry );1064 hal_remote_s64( XPTR( dst_cxy , &dst_ptr->array[fd] ) , entry ); 1027 1065 } 1028 1066 } 1029 1067 1030 1068 // release lock on source process fd_array 1031 remote_ spinlock_unlock( XPTR( src_cxy , &src_ptr->lock ) );1069 remote_queuelock_release( XPTR( src_cxy , &src_ptr->lock ) ); 1032 1070 1033 1071 } // end process_fd_remote_copy() 1072 1073 1074 //////////////////////////////////// 1075 bool_t process_fd_array_full( void ) 1076 { 1077 // get extended pointer on reference process 1078 xptr_t ref_xp = CURRENT_THREAD->process->ref_xp; 1079 1080 // get reference process cluster and local pointer 1081 process_t * ref_ptr = GET_PTR( ref_xp ); 1082 cxy_t ref_cxy = GET_CXY( ref_xp ); 1083 1084 // get number of open file descriptors from reference fd_array 1085 uint32_t current = hal_remote_l32( XPTR( ref_cxy , &ref_ptr->fd_array.current ) ); 1086 1087 return ( current >= CONFIG_PROCESS_FILE_MAX_NR ); 1088 } 1089 1034 1090 1035 1091 //////////////////////////////////////////////////////////////////////////////////// … … 1043 1099 { 1044 1100 ltid_t ltid; 1045 reg_t save_sr;1046 1101 bool_t found = false; 1047 1102 1048 1049 assert( (process != NULL) , "process argument is NULL" ); 1050 1051 assert( (thread != NULL) , "thread argument is NULL" ); 1052 1053 // take lock protecting th_tbl, depending on thread type: 1054 // we don't want to use a descheduling policy for idle thread initialisation 1055 if ( thread->type == THREAD_IDLE ) { 1056 spinlock_lock_busy( &process->th_lock , &save_sr ); 1057 } else { 1058 spinlock_lock( &process->th_lock ); 1059 } 1060 1061 // search a free slot in th_tbl[] 1062 for( ltid = 0 ; ltid < CONFIG_THREAD_MAX_PER_CLUSTER ; ltid++ ) 1103 // check arguments 1104 assert( (process != NULL) , "process argument is NULL" ); 1105 assert( (thread != NULL) , "thread argument is NULL" ); 1106 1107 // get the lock protecting th_tbl for all threads 1108 // but the idle thread executing kernel_init (cannot yield) 1109 if( thread->type != THREAD_IDLE ) rwlock_wr_acquire( &process->th_lock ); 1110 1111 // scan kth_tbl 1112 for( ltid = 0 ; ltid < CONFIG_THREADS_MAX_PER_CLUSTER ; ltid++ ) 1063 1113 { 1064 1114 if( process->th_tbl[ltid] == NULL ) … … 1079 1129 } 1080 1130 1081 // release lock protecting th_tbl 1082 hal_fence(); 1083 if( thread->type == THREAD_IDLE ) { 1084 spinlock_unlock_busy( &process->th_lock , save_sr ); 1085 } else { 1086 spinlock_unlock( &process->th_lock ); 1087 } 1088 1089 return (found) ? 0 : ENOMEM; 1131 // get the lock protecting th_tbl for all threads 1132 // but the idle thread executing kernel_init (cannot yield) 1133 if( thread->type != THREAD_IDLE ) rwlock_wr_release( &process->th_lock ); 1134 1135 return (found) ? 0 : 0xFFFFFFFF; 1090 1136 1091 1137 } // end process_register_thread() … … 1096 1142 uint32_t count; // number of threads in local process descriptor 1097 1143 1098 assert( (thread != NULL) , "thread argument is NULL" ); 1144 // check argument 1145 assert( (thread != NULL) , "thread argument is NULL" ); 1099 1146 1100 1147 process_t * process = thread->process; … … 1102 1149 // get thread local index 1103 1150 ltid_t ltid = LTID_FROM_TRDID( thread->trdid ); 1104 1105 // take lock protecting th_tbl 1106 spinlock_lock( &process->th_lock ); 1107 1151 1152 // the lock depends on thread user/kernel type, because we cannot 1153 // use a descheduling policy for the lock protecting the kth_tbl 1154 1155 // get the lock protecting th_tbl[] 1156 rwlock_wr_acquire( &process->th_lock ); 1157 1158 // get number of kernel threads 1108 1159 count = process->th_nr; 1109 1160 1110 assert( (count > 0) , "process th_nr cannot be 0\n" ); 1161 // check th_nr value 1162 assert( (count > 0) , "process kth_nr cannot be 0\n" ); 1111 1163 1112 1164 // remove thread from th_tbl[] … … 1114 1166 process->th_nr = count-1; 1115 1167 1116 // release lock protecting th_tbl 1117 hal_fence(); 1118 spinlock_unlock( &process->th_lock ); 1168 // release lock protecting kth_tbl 1169 rwlock_wr_release( &process->th_lock ); 1119 1170 1120 1171 return (count == 1); … … 1141 1192 1142 1193 // get parent process PID and extended pointer on .elf file 1143 parent_pid = hal_remote_lw (XPTR( parent_process_cxy , &parent_process_ptr->pid)); 1144 vfs_bin_xp = hal_remote_lwd(XPTR( parent_process_cxy , &parent_process_ptr->vfs_bin_xp)); 1145 1146 // check parent process is the reference process 1147 ref_xp = hal_remote_lwd( XPTR( parent_process_cxy , &parent_process_ptr->ref_xp ) ); 1148 1149 assert( (parent_process_xp == ref_xp ) , 1150 "parent process must be the reference process\n" ); 1194 parent_pid = hal_remote_l32 (XPTR( parent_process_cxy , &parent_process_ptr->pid)); 1195 vfs_bin_xp = hal_remote_l64(XPTR( parent_process_cxy , &parent_process_ptr->vfs_bin_xp)); 1196 1197 // get extended pointer on reference process 1198 ref_xp = hal_remote_l64( XPTR( parent_process_cxy , &parent_process_ptr->ref_xp ) ); 1199 1200 // check parent process is the reference process 1201 assert( (parent_process_xp == ref_xp ) , 1202 "parent process must be the reference process\n" ); 1151 1203 1152 1204 #if DEBUG_PROCESS_MAKE_FORK … … 1195 1247 #endif 1196 1248 1197 // give TXT ownership to child process1198 process_txt_set_ownership( XPTR( local_cxy , process ) );1199 1249 1200 1250 // copy VMM from parent descriptor to child descriptor … … 1218 1268 #endif 1219 1269 1220 // parent process gives TXT ownership to child process if required 1221 if( process_txt_is_owner(parent_process_xp) ) 1270 // if parent_process is INIT, or if parent_process is the TXT owner, 1271 // the child_process becomes the owner of its TXT terminal 1272 if( (parent_pid == 1) || process_txt_is_owner( parent_process_xp ) ) 1222 1273 { 1223 1274 process_txt_set_ownership( XPTR( local_cxy , process ) ); … … 1226 1277 cycle = (uint32_t)hal_get_cycles(); 1227 1278 if( DEBUG_PROCESS_MAKE_EXEC < cycle ) 1228 printk("\n[DBG] %s : thread %x in process %x gives TXT from parent %x to child %x / cycle %d\n", 1229 __FUNCTION__ , CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid, 1230 parent_pid, new_pid, cycle ); 1279 printk("\n[DBG] %s : thread %x in process %x / child takes TXT ownership / cycle %d\n", 1280 __FUNCTION__ , CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid, cycle ); 1231 1281 #endif 1232 1282 … … 1249 1299 } 1250 1300 1251 // check main thread LTID 1252 assert( (LTID_FROM_TRDID(thread->trdid) == 0) , 1253 "main thread must have LTID == 0\n" ); 1254 1255 //#if( DEBUG_PROCESS_MAKE_FORK & 1 ) 1256 #if DEBUG_PROCESS_MAKE_FORK 1301 // check main thread LTID 1302 assert( (LTID_FROM_TRDID(thread->trdid) == 0) , 1303 "main thread must have LTID == 0\n" ); 1304 1305 #if( DEBUG_PROCESS_MAKE_FORK & 1 ) 1257 1306 cycle = (uint32_t)hal_get_cycles(); 1258 1307 if( DEBUG_PROCESS_MAKE_FORK < cycle ) … … 1289 1338 1290 1339 // register process in parent children list 1291 remote_ spinlock_lock( children_lock_xp );1340 remote_queuelock_acquire( children_lock_xp ); 1292 1341 xlist_add_last( children_root_xp , XPTR( local_cxy , &process->children_list ) ); 1293 1342 hal_remote_atomic_add( children_nr_xp , 1 ); 1294 remote_ spinlock_unlock( children_lock_xp );1343 remote_queuelock_release( children_lock_xp ); 1295 1344 1296 1345 // return success … … 1341 1390 // open the file identified by <path> 1342 1391 file_xp = XPTR_NULL; 1343 file_id = -1;1392 file_id = 0xFFFFFFFF; 1344 1393 error = vfs_open( process, 1345 1394 path, … … 1442 1491 uint32_t cycle = (uint32_t)hal_get_cycles(); 1443 1492 if( DEBUG_PROCESS_ZERO_CREATE < cycle ) 1444 printk("\n[DBG] %s : thread %x enter / cycle %d\n", __FUNCTION__, CURRENT_THREAD, cycle ); 1493 printk("\n[DBG] %s : enter / cluster %x / cycle %d\n", 1494 __FUNCTION__, local_cxy, cycle ); 1445 1495 #endif 1446 1496 … … 1452 1502 process->term_state = 0; 1453 1503 1454 // reset th_tbl[] array a s empty1504 // reset th_tbl[] array and associated fields 1455 1505 uint32_t i; 1456 for( i = 0 ; i < CONFIG_THREAD _MAX_PER_CLUSTER ; i++ )1506 for( i = 0 ; i < CONFIG_THREADS_MAX_PER_CLUSTER ; i++ ) 1457 1507 { 1458 1508 process->th_tbl[i] = NULL; 1459 1509 } 1460 1510 process->th_nr = 0; 1461 spinlock_init( &process->th_lock ); 1511 rwlock_init( &process->th_lock , LOCK_PROCESS_THTBL ); 1512 1462 1513 1463 1514 // reset children list as empty 1464 1515 xlist_root_init( XPTR( local_cxy , &process->children_root ) ); 1465 remote_spinlock_init( XPTR( local_cxy , &process->children_lock ) );1466 1516 process->children_nr = 0; 1517 remote_queuelock_init( XPTR( local_cxy , &process->children_lock ), 1518 LOCK_PROCESS_CHILDREN ); 1467 1519 1468 1520 hal_fence(); … … 1471 1523 cycle = (uint32_t)hal_get_cycles(); 1472 1524 if( DEBUG_PROCESS_ZERO_CREATE < cycle ) 1473 printk("\n[DBG] %s : thread %x exit / cycle %d\n", __FUNCTION__, CURRENT_THREAD, cycle ); 1525 printk("\n[DBG] %s : exit / cluster %x / cycle %d\n", 1526 __FUNCTION__, local_cxy, cycle ); 1474 1527 #endif 1475 1528 1476 1529 } // end process_zero_init() 1477 1530 1478 ////////////////////////// 1531 //////////////////////////////// 1479 1532 void process_init_create( void ) 1480 1533 { … … 1498 1551 process = process_alloc(); 1499 1552 1500 assert( (process != NULL), 1501 "no memory for process descriptor in cluster %x\n", local_cxy ); 1553 // check memory allocator 1554 assert( (process != NULL), 1555 "no memory for process descriptor in cluster %x\n", local_cxy ); 1502 1556 1503 1557 // get PID from local cluster 1504 1558 error = cluster_pid_alloc( process , &pid ); 1505 1559 1506 assert( (error == 0), 1507 "cannot allocate PID in cluster %x\n", local_cxy ); 1508 1509 assert( (pid == 1) , 1510 "process INIT must be first process in cluster 0\n" ); 1560 // check PID allocator 1561 assert( (error == 0), 1562 "cannot allocate PID in cluster %x\n", local_cxy ); 1563 1564 // check PID value 1565 assert( (pid == 1) , 1566 "process INIT must be first process in cluster 0\n" ); 1511 1567 1512 1568 // initialize process descriptor / parent is local process_zero … … 1514 1570 pid, 1515 1571 XPTR( local_cxy , &process_zero ) ); 1572 1573 #if(DEBUG_PROCESS_INIT_CREATE & 1) 1574 if( DEBUG_PROCESS_INIT_CREATE < cycle ) 1575 printk("\n[DBG] %s : thread %x in process %x initialized process descriptor\n", 1576 __FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid ); 1577 #endif 1516 1578 1517 1579 // open the file identified by CONFIG_PROCESS_INIT_PATH … … 1525 1587 &file_id ); 1526 1588 1527 assert( (error == 0), 1528 "failed to open file <%s>\n", CONFIG_PROCESS_INIT_PATH ); 1529 1530 // register "code" and "data" vsegs as well as entry-point 1589 assert( (error == 0), 1590 "failed to open file <%s>\n", CONFIG_PROCESS_INIT_PATH ); 1591 1592 #if(DEBUG_PROCESS_INIT_CREATE & 1) 1593 if( DEBUG_PROCESS_INIT_CREATE < cycle ) 1594 printk("\n[DBG] %s : thread %x in process %x open .elf file decriptor\n", 1595 __FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid ); 1596 #endif 1597 1598 // register "code" and "data" vsegs as well as entry-point 1531 1599 // in process VMM, using information contained in the elf file. 1532 1600 error = elf_load_process( file_xp , process ); 1533 1601 1534 assert( (error == 0), 1535 "cannot access .elf file <%s>\n", CONFIG_PROCESS_INIT_PATH ); 1602 assert( (error == 0), 1603 "cannot access .elf file <%s>\n", CONFIG_PROCESS_INIT_PATH ); 1604 1605 #if(DEBUG_PROCESS_INIT_CREATE & 1) 1606 if( DEBUG_PROCESS_INIT_CREATE < cycle ) 1607 printk("\n[DBG] %s : thread %x in process %x registered code/data vsegs in VMM\n", 1608 __FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid ); 1609 #endif 1536 1610 1537 1611 // get extended pointers on process_zero children_root, children_lock … … 1539 1613 xptr_t children_lock_xp = XPTR( local_cxy , &process_zero.children_lock ); 1540 1614 1615 // take lock protecting kernel process children list 1616 remote_queuelock_acquire( children_lock_xp ); 1617 1541 1618 // register process INIT in parent local process_zero 1542 remote_spinlock_lock( children_lock_xp );1543 1619 xlist_add_last( children_root_xp , XPTR( local_cxy , &process->children_list ) ); 1544 1620 hal_atomic_add( &process_zero.children_nr , 1 ); 1545 remote_spinlock_unlock( children_lock_xp ); 1621 1622 // release lock protecting kernel process children list 1623 remote_queuelock_release( children_lock_xp ); 1624 1625 #if(DEBUG_PROCESS_INIT_CREATE & 1) 1626 if( DEBUG_PROCESS_INIT_CREATE < cycle ) 1627 printk("\n[DBG] %s : thread %x in process %x registered init process in parent\n", 1628 __FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid ); 1629 #endif 1546 1630 1547 1631 // select a core in local cluster to execute the main thread … … 1560 1644 &thread ); 1561 1645 1562 assert( (error == 0), 1563 "cannot create main thread for <%s>\n", CONFIG_PROCESS_INIT_PATH ); 1564 1565 assert( (thread->trdid == 0), 1566 "main thread must have index 0 for <%s>\n", CONFIG_PROCESS_INIT_PATH ); 1646 assert( (error == 0), 1647 "cannot create main thread for <%s>\n", CONFIG_PROCESS_INIT_PATH ); 1648 1649 assert( (thread->trdid == 0), 1650 "main thread must have index 0 for <%s>\n", CONFIG_PROCESS_INIT_PATH ); 1651 1652 #if(DEBUG_PROCESS_INIT_CREATE & 1) 1653 if( DEBUG_PROCESS_INIT_CREATE < cycle ) 1654 printk("\n[DBG] %s : thread %x in process %x created main thread\n", 1655 __FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid ); 1656 #endif 1567 1657 1568 1658 // activate thread … … 1618 1708 1619 1709 // get PID and state 1620 pid = hal_remote_l w( XPTR( process_cxy , &process_ptr->pid ) );1621 state = hal_remote_l w( XPTR( process_cxy , &process_ptr->term_state ) );1710 pid = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid ) ); 1711 state = hal_remote_l32( XPTR( process_cxy , &process_ptr->term_state ) ); 1622 1712 1623 1713 // get PPID 1624 parent_xp = hal_remote_l wd( XPTR( process_cxy , &process_ptr->parent_xp ) );1714 parent_xp = hal_remote_l64( XPTR( process_cxy , &process_ptr->parent_xp ) ); 1625 1715 parent_cxy = GET_CXY( parent_xp ); 1626 1716 parent_ptr = GET_PTR( parent_xp ); 1627 ppid = hal_remote_l w( XPTR( parent_cxy , &parent_ptr->pid ) );1717 ppid = hal_remote_l32( XPTR( parent_cxy , &parent_ptr->pid ) ); 1628 1718 1629 1719 // get number of threads 1630 th_nr = hal_remote_l w( XPTR( process_cxy , &process_ptr->th_nr ) );1720 th_nr = hal_remote_l32( XPTR( process_cxy , &process_ptr->th_nr ) ); 1631 1721 1632 1722 // get pointers on owner process descriptor 1633 owner_xp = hal_remote_l wd( XPTR( process_cxy , &process_ptr->owner_xp ) );1723 owner_xp = hal_remote_l64( XPTR( process_cxy , &process_ptr->owner_xp ) ); 1634 1724 owner_cxy = GET_CXY( owner_xp ); 1635 1725 owner_ptr = GET_PTR( owner_xp ); 1636 1726 1637 1727 // get extended pointer on TXT_RX file descriptor attached to process 1638 txt_file_xp = hal_remote_l wd( XPTR( owner_cxy , &owner_ptr->fd_array.array[0] ) );1728 txt_file_xp = hal_remote_l64( XPTR( owner_cxy , &owner_ptr->fd_array.array[0] ) ); 1639 1729 1640 1730 assert( (txt_file_xp != XPTR_NULL) , … … 1650 1740 XPTR( txt_chdev_cxy , txt_chdev_ptr->name ) ); 1651 1741 1652 txt_owner_xp = (xptr_t)hal_remote_l wd( XPTR( txt_chdev_cxy,1742 txt_owner_xp = (xptr_t)hal_remote_l64( XPTR( txt_chdev_cxy, 1653 1743 &txt_chdev_ptr->ext.txt.owner_xp ) ); 1654 1744 1655 1745 // get process .elf name 1656 elf_file_xp = hal_remote_l wd( XPTR( process_cxy , &process_ptr->vfs_bin_xp ) );1746 elf_file_xp = hal_remote_l64( XPTR( process_cxy , &process_ptr->vfs_bin_xp ) ); 1657 1747 elf_file_cxy = GET_CXY( elf_file_xp ); 1658 1748 elf_file_ptr = (vfs_file_t *)GET_PTR( elf_file_xp ); … … 1718 1808 xptr_t lock_xp; // extended pointer on list lock in chdev 1719 1809 1720 1721 1722 1723 1724 1725 1726 1810 // check process is in owner cluster 1811 assert( (CXY_FROM_PID( process->pid ) == local_cxy) , 1812 "process descriptor not in owner cluster" ); 1813 1814 // check terminal index 1815 assert( (txt_id < LOCAL_CLUSTER->nb_txt_channels) , 1816 "illegal TXT terminal index" ); 1727 1817 1728 1818 // get pointers on TXT_RX[txt_id] chdev … … 1735 1825 lock_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.lock ); 1736 1826 1827 // get lock protecting list of processes attached to TXT 1828 remote_busylock_acquire( lock_xp ); 1829 1737 1830 // insert process in attached process list 1738 remote_spinlock_lock( lock_xp );1739 1831 xlist_add_last( root_xp , XPTR( local_cxy , &process->txt_list ) ); 1740 remote_spinlock_unlock( lock_xp ); 1832 1833 // release lock protecting list of processes attached to TXT 1834 remote_busylock_release( lock_xp ); 1741 1835 1742 1836 #if DEBUG_PROCESS_TXT … … 1765 1859 process_cxy = GET_CXY( process_xp ); 1766 1860 process_ptr = GET_PTR( process_xp ); 1767 1768 // check process descriptor in owner cluster 1769 process_pid = hal_remote_lw( XPTR( process_cxy , &process_ptr->pid ) ); 1770 1771 1861 process_pid = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid ) ); 1862 1863 // check process descriptor in owner cluster 1864 assert( (CXY_FROM_PID( process_pid ) == process_cxy ) , 1865 "process descriptor not in owner cluster" ); 1772 1866 1773 1867 // release TXT ownership (does nothing if not TXT owner) … … 1775 1869 1776 1870 // get extended pointer on process stdin file 1777 file_xp = (xptr_t)hal_remote_l wd( XPTR( process_cxy , &process_ptr->fd_array.array[0] ) );1871 file_xp = (xptr_t)hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[0] ) ); 1778 1872 1779 1873 // get pointers on TXT_RX chdev … … 1785 1879 lock_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.lock ); 1786 1880 1881 // get lock protecting list of processes attached to TXT 1882 remote_busylock_acquire( lock_xp ); 1883 1787 1884 // unlink process from attached process list 1788 remote_spinlock_lock( lock_xp );1789 1885 xlist_unlink( XPTR( process_cxy , &process_ptr->txt_list ) ); 1790 remote_spinlock_unlock( lock_xp ); 1886 1887 // release lock protecting list of processes attached to TXT 1888 remote_busylock_release( lock_xp ); 1791 1889 1792 1890 #if DEBUG_PROCESS_TXT 1793 1891 uint32_t cycle = (uint32_t)hal_get_cycles(); 1794 uint32_t txt_id = hal_remote_l w( XPTR( chdev_cxy , &chdev_ptr->channel ) );1892 uint32_t txt_id = hal_remote_l32( XPTR( chdev_cxy , &chdev_ptr->channel ) ); 1795 1893 if( DEBUG_PROCESS_TXT < cycle ) 1796 1894 printk("\n[DBG] %s : thread %x in process %x detached process %x from TXT %d / cycle %d\n", … … 1815 1913 process_cxy = GET_CXY( process_xp ); 1816 1914 process_ptr = GET_PTR( process_xp ); 1817 process_pid = hal_remote_l w( XPTR( process_cxy , &process_ptr->pid ) );1915 process_pid = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid ) ); 1818 1916 1819 1917 // check owner cluster … … 1822 1920 1823 1921 // get extended pointer on stdin pseudo file 1824 file_xp = hal_remote_l wd( XPTR( process_cxy , &process_ptr->fd_array.array[0] ) );1922 file_xp = hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[0] ) ); 1825 1923 1826 1924 // get pointers on TXT chdev … … 1830 1928 1831 1929 // set owner field in TXT chdev 1832 hal_remote_s wd( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , process_xp );1930 hal_remote_s64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , process_xp ); 1833 1931 1834 1932 #if DEBUG_PROCESS_TXT 1835 1933 uint32_t cycle = (uint32_t)hal_get_cycles(); 1836 uint32_t txt_id = hal_remote_l w( XPTR( txt_cxy , &txt_ptr->channel ) );1934 uint32_t txt_id = hal_remote_l32( XPTR( txt_cxy , &txt_ptr->channel ) ); 1837 1935 if( DEBUG_PROCESS_TXT < cycle ) 1838 1936 printk("\n[DBG] %s : thread %x in process %x give TXT %d to process %x / cycle %d\n", … … 1868 1966 process_cxy = GET_CXY( process_xp ); 1869 1967 process_ptr = GET_PTR( process_xp ); 1870 process_pid = hal_remote_l w( XPTR( process_cxy , &process_ptr->pid ) );1968 process_pid = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid ) ); 1871 1969 1872 1970 // check owner cluster … … 1875 1973 1876 1974 // get extended pointer on stdin pseudo file 1877 file_xp = hal_remote_l wd( XPTR( process_cxy , &process_ptr->fd_array.array[0] ) );1975 file_xp = hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[0] ) ); 1878 1976 1879 1977 // get pointers on TXT chdev … … 1883 1981 1884 1982 // get extended pointer on TXT_RX owner and TXT channel 1885 owner_xp = hal_remote_l wd( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) );1886 txt_id = hal_remote_l w( XPTR( txt_cxy , &txt_ptr->channel ) );1983 owner_xp = hal_remote_l64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) ); 1984 txt_id = hal_remote_l32 ( XPTR( txt_cxy , &txt_ptr->channel ) ); 1887 1985 1888 1986 // transfer ownership only if process is the TXT owner … … 1894 1992 1895 1993 // get lock 1896 remote_ spinlock_lock( lock_xp );1994 remote_busylock_acquire( lock_xp ); 1897 1995 1898 1996 if( process_get_ppid( process_xp ) != 1 ) // process is not KSH … … 1908 2006 { 1909 2007 // release lock 1910 remote_ spinlock_unlock( lock_xp );2008 remote_busylock_release( lock_xp ); 1911 2009 1912 2010 // set owner field in TXT chdev 1913 hal_remote_s wd( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , current_xp );2011 hal_remote_s64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , current_xp ); 1914 2012 1915 2013 #if DEBUG_PROCESS_TXT 1916 2014 cycle = (uint32_t)hal_get_cycles(); 1917 uint32_t ksh_pid = hal_remote_l w( XPTR( current_cxy , ¤t_ptr->pid ) );2015 uint32_t ksh_pid = hal_remote_l32( XPTR( current_cxy , ¤t_ptr->pid ) ); 1918 2016 if( DEBUG_PROCESS_TXT < cycle ) 1919 2017 printk("\n[DBG] %s : thread %x in process %x release TXT %d to KSH %x / cycle %d\n", … … 1926 2024 1927 2025 // release lock 1928 remote_ spinlock_unlock( lock_xp );2026 remote_busylock_release( lock_xp ); 1929 2027 1930 2028 // PANIC if KSH not found … … 1945 2043 { 1946 2044 // release lock 1947 remote_ spinlock_unlock( lock_xp );2045 remote_busylock_release( lock_xp ); 1948 2046 1949 2047 // set owner field in TXT chdev 1950 hal_remote_s wd( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , current_xp );2048 hal_remote_s64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , current_xp ); 1951 2049 1952 2050 #if DEBUG_PROCESS_TXT 1953 2051 cycle = (uint32_t)hal_get_cycles(); 1954 uint32_t new_pid = hal_remote_l w( XPTR( current_cxy , ¤t_ptr->pid ) );2052 uint32_t new_pid = hal_remote_l32( XPTR( current_cxy , ¤t_ptr->pid ) ); 1955 2053 if( DEBUG_PROCESS_TXT < cycle ) 1956 2054 printk("\n[DBG] %s : thread %x in process %x release TXT %d to process %x / cycle %d\n", … … 1963 2061 1964 2062 // release lock 1965 remote_ spinlock_unlock( lock_xp );2063 remote_busylock_release( lock_xp ); 1966 2064 1967 2065 // no more owner for TXT if no other process found 1968 hal_remote_s wd( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , XPTR_NULL );2066 hal_remote_s64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , XPTR_NULL ); 1969 2067 1970 2068 #if DEBUG_PROCESS_TXT … … 1993 2091 1994 2092 1995 //////////////////////////////////////////////// //1996 uint32_t process_txt_is_owner( xptr_t process_xp )2093 //////////////////////////////////////////////// 2094 bool_t process_txt_is_owner( xptr_t process_xp ) 1997 2095 { 1998 2096 // get local pointer and cluster of process in owner cluster … … 2000 2098 process_t * process_ptr = GET_PTR( process_xp ); 2001 2099 2002 // checkowner cluster2003 pid_t process_pid = hal_remote_lw( XPTR( process_cxy , &process_ptr->pid ) );2004 2005 2100 // check calling thread execute in target process owner cluster 2101 pid_t process_pid = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid ) ); 2102 assert( (process_cxy == CXY_FROM_PID( process_pid )) , 2103 "process descriptor not in owner cluster\n" ); 2006 2104 2007 2105 // get extended pointer on stdin pseudo file 2008 xptr_t file_xp = hal_remote_l wd( XPTR( process_cxy , &process_ptr->fd_array.array[0] ) );2106 xptr_t file_xp = hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[0] ) ); 2009 2107 2010 2108 // get pointers on TXT chdev … … 2014 2112 2015 2113 // get extended pointer on TXT_RX owner process 2016 xptr_t owner_xp = hal_remote_l wd( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) );2114 xptr_t owner_xp = hal_remote_l64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) ); 2017 2115 2018 2116 return (process_xp == owner_xp); … … 2027 2125 chdev_t * txt_rx_ptr = GET_PTR( txt_rx_xp ); 2028 2126 2029 return (xptr_t)hal_remote_l wd( XPTR( txt_rx_cxy , &txt_rx_ptr->ext.txt.owner_xp ) );2127 return (xptr_t)hal_remote_l64( XPTR( txt_rx_cxy , &txt_rx_ptr->ext.txt.owner_xp ) ); 2030 2128 2031 2129 } // end process_txt_get_owner() … … 2045 2143 xptr_t txt0_xp; 2046 2144 xptr_t txt0_lock_xp; 2047 reg_t txt0_save_sr; // save SR to take TXT0 lock in busy mode2048 2145 2049 2146 assert( (txt_id < LOCAL_CLUSTER->nb_txt_channels) , … … 2068 2165 2069 2166 // get lock on attached process list 2070 remote_ spinlock_lock( lock_xp );2167 remote_busylock_acquire( lock_xp ); 2071 2168 2072 2169 // get TXT0 lock in busy waiting mode 2073 remote_ spinlock_lock_busy( txt0_lock_xp , &txt0_save_sr);2170 remote_busylock_acquire( txt0_lock_xp ); 2074 2171 2075 2172 // display header … … 2085 2182 2086 2183 // release TXT0 lock in busy waiting mode 2087 remote_ spinlock_unlock_busy( txt0_lock_xp , txt0_save_sr);2184 remote_busylock_release( txt0_lock_xp ); 2088 2185 2089 2186 // release lock on attached process list 2090 remote_ spinlock_unlock( lock_xp );2187 remote_busylock_release( lock_xp ); 2091 2188 2092 2189 } // end process_txt_display -
trunk/kernel/kern/process.h
r527 r564 1 1 /* 2 * process.h - process related management functions2 * process.h - process related functions definition. 3 3 * 4 4 * Authors Ghassan Almaless (2008,2009,2010,2011,2012) … … 33 33 #include <xlist.h> 34 34 #include <bits.h> 35 #include <spinlock.h> 35 #include <busylock.h> 36 #include <queuelock.h> 37 #include <remote_queuelock.h> 38 #include <remote_rwlock.h> 36 39 #include <hal_atomic.h> 37 40 #include <vmm.h> … … 69 72 * A free entry in this array contains the XPTR_NULL value. 70 73 * The array size is defined by a the CONFIG_PROCESS_FILE_MAX_NR parameter. 71 * All modifications (open/close) in this structure must be done by the reference cluster, 72 * and reported in process copies. 74 * 75 * NOTE: - Only the fd_array[] in the reference process contains a complete list of open 76 * files, and is protected by the lock against concurrent access. 77 * - the fd_array[] in a process copy is simply a cache containing a subset of the 78 * open files to speed the fdid to xptr translation, but the "lock" and "current 79 * fields should not be used. 80 * - all modifications made by the process_fd_remove() are done in reference cluster 81 * and reported in all process_copies. 73 82 ********************************************************************************************/ 74 83 75 84 typedef struct fd_array_s 76 85 { 77 remote_ spinlock_t lock;/*! lock protecting fd_array */78 uint32_t current;/*! current number of open files */79 xptr_t array[CONFIG_PROCESS_FILE_MAX_NR]; /*! xptr on open file descriptors*/86 remote_queuelock_t lock; /*! lock protecting fd_array */ 87 uint32_t current; /*! current number of open files */ 88 xptr_t array[CONFIG_PROCESS_FILE_MAX_NR]; /*! open file descriptors */ 80 89 } 81 90 fd_array_t; … … 100 109 * complete in the reference process cluster, other copies are read-only caches. 101 110 * 4) The <sem_root>, <mutex_root>, <barrier_root>, <condvar_root>, and the associated 102 * <sync_lock>, that aredynamically allocated, are only defined in the reference cluster.111 * <sync_lock>, dynamically allocated, are only defined in the reference cluster. 103 112 * 5) The <children_root>, <children_nr>, <children_list>, and <txt_list> fields are only 104 113 * defined in the reference cluster, and are undefined in other clusters. 105 * 6) The <local_list>, <copies_list>, <th_tbl>, <th_nr>, < th_lock> fields106 * are defined in all process descriptors copies.114 * 6) The <local_list>, <copies_list>, <th_tbl>, <th_nr>, <u_th_lock> or <k_th_lock> fields 115 * are specific n each cluster, and are defined in all process descriptors copies. 107 116 * 7) The termination <flags> and <exit_status> are only defined in the reference cluster. 108 * The term_state format is defined in the shared_syscalls.h file.117 * (The term_state format is defined in the shared_syscalls.h file ). 109 118 ********************************************************************************************/ 110 119 111 120 typedef struct process_s 112 121 { 113 vmm_t vmm; /*! embedded virtual memory manager */ 114 115 fd_array_t fd_array; /*! embedded open file descriptors array */ 116 117 xptr_t vfs_root_xp; /*! extended pointer on current VFS root inode */ 118 xptr_t vfs_bin_xp; /*! extended pointer on .elf file descriptor */ 119 pid_t pid; /*! process identifier */ 120 xptr_t ref_xp; /*! extended pointer on reference process */ 121 xptr_t owner_xp; /*! extended pointer on owner process */ 122 xptr_t parent_xp; /*! extended pointer on parent process */ 123 124 xptr_t vfs_cwd_xp; /*! extended pointer on current working dir inode */ 125 remote_rwlock_t cwd_lock; /*! lock protecting working directory changes */ 126 127 xlist_entry_t children_root; /*! root of the children process xlist */ 128 remote_spinlock_t children_lock; /*! lock protecting children process xlist */ 129 uint32_t children_nr; /*! number of children processes */ 130 131 xlist_entry_t children_list; /*! member of list of children of same parent */ 132 xlist_entry_t local_list; /*! member of list of process in same cluster */ 133 xlist_entry_t copies_list; /*! member of list of copies of same process */ 134 xlist_entry_t txt_list; /*! member of list of processes sharing same TXT */ 135 136 spinlock_t th_lock; /*! lock protecting th_tbl[] concurrent access */ 137 uint32_t th_nr; /*! number of threads in this cluster */ 138 139 struct thread_s * th_tbl[CONFIG_THREAD_MAX_PER_CLUSTER]; /*! pointers on local threads */ 140 141 xlist_entry_t sem_root; /*! root of the process semaphore list */ 142 xlist_entry_t mutex_root; /*! root of the process mutex list */ 143 xlist_entry_t barrier_root; /*! root of the process barrier list */ 144 xlist_entry_t condvar_root; /*! root of the process condvar list */ 145 remote_spinlock_t sync_lock; /*! lock protecting sem,mutex,barrier,condvar lists */ 146 147 uint32_t term_state; /*! termination status (flags & exit status) */ 122 vmm_t vmm; /*! embedded virtual memory manager */ 123 124 fd_array_t fd_array; /*! embedded open file descriptors array */ 125 126 xptr_t vfs_root_xp; /*! extended pointer on current VFS root inode */ 127 xptr_t vfs_bin_xp; /*! extended pointer on .elf file descriptor */ 128 pid_t pid; /*! process identifier */ 129 xptr_t ref_xp; /*! extended pointer on reference process */ 130 xptr_t owner_xp; /*! extended pointer on owner process */ 131 xptr_t parent_xp; /*! extended pointer on parent process */ 132 133 xptr_t vfs_cwd_xp; /*! extended pointer on current working dir inode */ 134 remote_rwlock_t cwd_lock; /*! lock protecting working directory changes */ 135 136 xlist_entry_t children_root; /*! root of the children process xlist */ 137 remote_queuelock_t children_lock; /*! lock protecting children process xlist */ 138 uint32_t children_nr; /*! number of children processes */ 139 140 xlist_entry_t children_list; /*! member of list of children of same parent */ 141 xlist_entry_t local_list; /*! member of list of process in same cluster */ 142 xlist_entry_t copies_list; /*! member of list of copies of same process */ 143 xlist_entry_t txt_list; /*! member of list of processes sharing same TXT */ 144 145 struct thread_s * th_tbl[CONFIG_THREADS_MAX_PER_CLUSTER]; /*! local threads */ 146 uint32_t th_nr; /*! number of threads in this cluster */ 147 rwlock_t th_lock; /*! lock protecting th_tbl[] i */ 148 149 xlist_entry_t sem_root; /*! root of the user definedsemaphore list */ 150 xlist_entry_t mutex_root; /*! root of the user defined mutex list */ 151 xlist_entry_t barrier_root; /*! root of the user defined barrier list */ 152 xlist_entry_t condvar_root; /*! root of the user defined condvar list */ 153 remote_queuelock_t sync_lock; /*! lock protecting user defined synchro lists */ 154 155 uint32_t term_state; /*! termination status (flags & exit status) */ 148 156 } 149 157 process_t; … … 210 218 211 219 /********************************************************************************************* 212 * This function initializes a local,reference, user process descriptor from another process213 * descriptor, defined by the <parent_xp> argument. The <process> and <pid> arguments must214 * be previously allocated by the caller. This function can be called by two functions:220 * This function initializes a reference, user process descriptor from another process 221 * descriptor, defined by the <parent_xp> argument. The <process> and <pid> arguments 222 * are previously allocated by the caller. This function can be called by two functions: 215 223 * 1) process_init_create() : process is the INIT process; parent is process-zero. 216 224 * 2) process_make_fork() : the parent process descriptor is generally remote. … … 411 419 412 420 /********************************************************************************************* 413 * This function uses as many remote accesses as required, to reset an entry in fd_array[],414 * in all clusters containing a copy. The entry is identified by the <fdid> argument.415 * This function must be executed by a thread running reference cluster, that contains416 * the complete list of process descriptors copies.417 *********************************************************************************************418 * @ process : pointer on the local process descriptor.419 * @ fdid : file descriptor index in the fd_array.420 ********************************************************************************************/421 void process_fd_remove( process_t * process,422 uint32_t fdid );423 424 /*********************************************************************************************425 * This function returns an extended pointer on a file descriptor identified by its index426 * in fd_array. It can be called by any thread running in any cluster.427 * It accesses first the local process descriptor. In case of local miss, it uses remote428 * access to access the reference process descriptor.429 * It updates the local fd_array when the file descriptor exists in reference cluster.430 * The file descriptor refcount is not incremented.431 *********************************************************************************************432 * @ process : pointer on the local process descriptor.433 * @ fdid : file descriptor index in the fd_array.434 * @ return extended pointer on file descriptor if success / return XPTR_NULL if not found.435 ********************************************************************************************/436 xptr_t process_fd_get_xptr( process_t * process,437 uint32_t fdid );438 439 /*********************************************************************************************440 * This function checks the number of open files for a given process.441 * It can be called by any thread in any cluster, because it uses portable remote access442 * primitives to access the reference process descriptor.443 *********************************************************************************************444 * @ returns true if file descriptor array full.445 ********************************************************************************************/446 bool_t process_fd_array_full( void );447 448 /*********************************************************************************************449 421 * This function allocates a free slot in the fd_array of the reference process, 450 422 * register the <file_xp> argument in the allocated slot, and return the slot index. 451 423 * It can be called by any thread in any cluster, because it uses portable remote access 452 424 * primitives to access the reference process descriptor. 425 * It takes the lock protecting the reference fd_array against concurrent accesses. 453 426 ********************************************************************************************* 454 427 * @ file_xp : extended pointer on the file descriptor to be registered. … … 459 432 xptr_t file_xp, 460 433 uint32_t * fdid ); 434 435 /********************************************************************************************* 436 * This function uses as many remote accesses as required, to reset an entry in fd_array[], 437 * in all clusters containing a copy. The entry is identified by the <fdid> argument. 438 * This function must be executed by a thread running in reference cluster, that contains 439 * the complete list of process descriptors copies. 440 * It takes the lock protecting the reference fd_array against concurrent accesses. 441 * TODO this function is not implemented yet. 442 ********************************************************************************************* 443 * @ process : pointer on the local process descriptor. 444 * @ fdid : file descriptor index in the fd_array. 445 ********************************************************************************************/ 446 void process_fd_remove( process_t * process, 447 uint32_t fdid ); 448 449 /********************************************************************************************* 450 * This function returns an extended pointer on a file descriptor identified by its index 451 * in fd_array. It can be called by any thread running in any cluster. 452 * It accesses first the local process descriptor. In case of local miss, it takes 453 * the lock protecting the reference fd_array, and access the reference process descriptor. 454 * It updates the local fd_array when the file descriptor exists in reference cluster. 455 * It takes the lock protecting the reference fd_array against concurrent accesses. 456 * The file descriptor refcount is not incremented. 457 ********************************************************************************************* 458 * @ process : pointer on the local process descriptor. 459 * @ fdid : file descriptor index in the fd_array. 460 * @ return extended pointer on file descriptor if success / return XPTR_NULL if not found. 461 ********************************************************************************************/ 462 xptr_t process_fd_get_xptr( process_t * process, 463 uint32_t fdid ); 461 464 462 465 /********************************************************************************************* … … 465 468 * <dst_xp> fd_array, embedded in another process descriptor. 466 469 * The calling thread can be running in any cluster. 467 * It takes the remote lock protecting the <src_xp> fd_array during the copy.470 * It takes the lock protecting the reference fd_array against concurrent accesses. 468 471 * For each involved file descriptor, the refcount is incremented. 469 472 ********************************************************************************************* … … 474 477 xptr_t src_xp ); 475 478 479 /********************************************************************************************* 480 * This function checks the current number of open files for a given process. 481 * It can be called by any thread in any cluster, because it uses portable remote access 482 * primitives to access the reference process descriptor. 483 * It does not take the lock protecting the reference fd_array. 484 ********************************************************************************************* 485 * @ returns true if file descriptor array full. 486 ********************************************************************************************/ 487 bool_t process_fd_array_full( void ); 488 476 489 477 490 … … 479 492 480 493 /********************************************************************************************* 481 * This function registers a new thread in the local process descriptor. 482 * It checks that there is an available slot in the local th_tbl[] array, 483 * allocates a new LTID, and registers the new thread in the th_tbl[]. 484 * It takes the lock protecting exclusive access to the th_tbl[]. 494 * This function atomically registers a new thread in the local process descriptor. 495 * It checks that there is an available slot in the local th_tbl[] array, and allocates 496 * a new LTID using the relevant lock depending on the kernel/user type. 485 497 ********************************************************************************************* 486 498 * @ process : pointer on the local process descriptor. … … 494 506 495 507 /********************************************************************************************* 496 * This function removes a thread registration from the local process descriptor.497 * It takes the lock protecting exclusive access to the th_tbl[].508 * This function atomically removes a thread registration from the local process descriptor 509 * th_tbl[] array, using the relevant lock, depending on the kernel/user type. 498 510 ********************************************************************************************* 499 511 * @ thread : local pointer on thread to be removed. … … 541 553 542 554 /********************************************************************************************* 543 * This function gives the TXT ownership to a process identified by the <process_xp> argument. 555 * This function gives a process identified by the <process_xp> argument the exclusive 556 * ownership of its attached TXT_RX terminal (i.e. put the process in foreground). 544 557 * It can be called by a thread running in any cluster, but the <process_xp> must be the 545 558 * owner cluster process descriptor. … … 568 581 * process_xp must be the owner cluster process descriptor. 569 582 ********************************************************************************************* 570 * @ return a non-zero value if target process is TXT owner.571 ********************************************************************************************/ 572 uint32_t process_txt_is_owner( xptr_t process_xp );583 * @ returns true if target process is TXT owner. 584 ********************************************************************************************/ 585 bool_t process_txt_is_owner( xptr_t process_xp ); 573 586 574 587 /********************************************************************************************* -
trunk/kernel/kern/rpc.c
r503 r564 43 43 44 44 ///////////////////////////////////////////////////////////////////////////////////////// 45 // array of function pointers (must be consistent with enum in rpc.h) 45 // Array of function pointers and array of printable strings. 46 // These arrays must be kept consistent with enum in rpc.h file. 46 47 ///////////////////////////////////////////////////////////////////////////////////////// 47 48 … … 82 83 }; 83 84 84 ////////////////////////////////////////////// 85 char * rpc_str[RPC_MAX_INDEX] = 86 { 87 "PMEM_GET_PAGES", // 0 88 "PMEM_RELEASE_PAGES", // 1 89 "undefined", // 2 90 "PROCESS_MAKE_FORK", // 3 91 "undefined", // 4 92 "undefined", // 5 93 "THREAD_USER_CREATE", // 6 94 "THREAD_KERNEL_CREATE", // 7 95 "undefined", // 8 96 "PROCESS_SIGACTION", // 9 97 98 "VFS_INODE_CREATE", // 10 99 "VFS_INODE_DESTROY", // 11 100 "VFS_DENTRY_CREATE", // 12 101 "VFS_DENTRY_DESTROY", // 13 102 "VFS_FILE_CREATE", // 14 103 "VFS_FILE_DESTROY", // 15 104 "VFS_INODE_LOAD", // 16 105 "VFS_MAPPER_LOAD_ALL", // 17 106 "FATFS_GET_CLUSTER", // 18 107 "undefined", // 19 108 109 "GET_VSEG", // 20 110 "GET_PTE", // 21 111 "KCM_ALLOC", // 22 112 "KCM_FREE", // 23 113 "MAPPER_MOVE_BUFFER", // 24 114 "MAPPER_GET_PAGE", // 25 115 "VMM_CREATE_VSEG", // 26 116 "undefined", // 27 117 "VMM_SET_COW", // 28 118 "VMM_DISPLAY", // 29 119 }; 120 121 ////////////////////////////////////////////////////////////////////////////////// 85 122 void __attribute__((noinline)) rpc_undefined( xptr_t xp __attribute__ ((unused)) ) 86 123 { … … 105 142 client_core_lid = this->core->lid; 106 143 144 // check calling thread can yield when it is not the idle thread 145 assert( (this->busylocks == 0) || (this->type == THREAD_IDLE), 146 "cannot yield : busylocks = %d\n", this->busylocks ); 147 107 148 #if DEBUG_RPC_CLIENT_GENERIC 108 149 uint32_t cycle = (uint32_t)hal_get_cycles(); 109 150 if( DEBUG_RPC_CLIENT_GENERIC < cycle ) 110 printk("\n[DBG] %s : thread %x in process %x enter for rpc [%d]/ cycle %d\n",111 __FUNCTION__, this->trdid, this->process->pid, rpc ->index, cycle );151 printk("\n[DBG] %s : thread %x in process %x enter for rpc %s / server_cxy %x / cycle %d\n", 152 __FUNCTION__, this->trdid, this->process->pid, rpc_str[rpc->index], server_cxy, cycle ); 112 153 #endif 113 154 114 155 // select a server_core : use client core index if possible / core 0 otherwise 115 if( client_core_lid < hal_remote_l w( XPTR( server_cxy , &LOCAL_CLUSTER->cores_nr ) ) )156 if( client_core_lid < hal_remote_l32( XPTR( server_cxy , &LOCAL_CLUSTER->cores_nr ) ) ) 116 157 { 117 158 server_core_lid = client_core_lid; … … 130 171 131 172 // get local pointer on rpc_fifo in remote cluster, 132 remote_fifo_t * rpc_fifo = &LOCAL_CLUSTER->rpc_fifo[server_core_lid]; 133 134 // post RPC in remote fifo / deschedule and retry if fifo full 173 remote_fifo_t * rpc_fifo = &LOCAL_CLUSTER->rpc_fifo[server_core_lid]; 174 xptr_t rpc_fifo_xp = XPTR( server_cxy , rpc_fifo ); 175 176 // post RPC in remote fifo / deschedule without blocking if fifo full 135 177 do 136 178 { 137 full = remote_fifo_put_item( XPTR( server_cxy , rpc_fifo ), (uint64_t )desc_xp ); 179 full = remote_fifo_put_item( rpc_fifo_xp , (uint64_t )desc_xp ); 180 138 181 if ( full ) 139 182 { … … 151 194 #if DEBUG_RPC_CLIENT_GENERIC 152 195 cycle = (uint32_t)hal_get_cycles(); 196 uint32_t items = remote_fifo_items( rpc_fifo_xp ); 153 197 if( DEBUG_RPC_CLIENT_GENERIC < cycle ) 154 printk("\n[DBG] %s : thread %x in process %x / rpc [%d] / rpc_ptr %x/ cycle %d\n",155 __FUNCTION__, this->trdid, this->process->pid, rpc ->index, rpc, cycle );198 printk("\n[DBG] %s : thread %x in process %x / rpc %s / items %d / cycle %d\n", 199 __FUNCTION__, this->trdid, this->process->pid, rpc_str[rpc->index], items, cycle ); 156 200 #endif 157 201 … … 159 203 dev_pic_send_ipi( server_cxy , server_core_lid ); 160 204 161 // wait RPC completion before returning if blocking RPC 162 // - busy waiting policy during kernel_init, or if threads cannot yield163 // - block and deschedule in all other cases205 // wait RPC completion before returning if blocking RPC : 206 // - descheduling without blocking if thread idle (in lernel init) 207 // - block and deschedule policy for any other thread 164 208 if ( rpc->blocking ) 165 209 { 166 if( (this->type == THREAD_IDLE) || (thread_can_yield() == false) ) // busy waiting210 if( this->type == THREAD_IDLE ) // deschedule without blocking policy 167 211 { 168 212 169 213 #if DEBUG_RPC_CLIENT_GENERIC 170 214 cycle = (uint32_t)hal_get_cycles(); 171 215 if( DEBUG_RPC_CLIENT_GENERIC < cycle ) 172 printk("\n[DBG] %s : thread %x in process %x busy waiting for rpc[%d]/ cycle %d\n",173 __FUNCTION__, this->trdid, this->process->pid, rpc ->index, cycle );174 #endif 175 176 while( rpc->responses ) hal_fixed_delay( 100);216 printk("\n[DBG] %s : thread %x in process %x enter waiting loop for rpc %s / cycle %d\n", 217 __FUNCTION__, this->trdid, this->process->pid, rpc_str[rpc->index], cycle ); 218 #endif 219 220 while( rpc->responses ) sched_yield( "busy waiting on RPC"); 177 221 178 222 #if DEBUG_RPC_CLIENT_GENERIC 179 223 cycle = (uint32_t)hal_get_cycles(); 180 224 if( DEBUG_RPC_CLIENT_GENERIC < cycle ) 181 printk("\n[DBG] %s : thread %x in process %x resumes for rpc[%d] / cycle %d\n", 182 __FUNCTION__, this->trdid, this->process->pid, rpc->index, cycle ); 183 #endif 184 } 185 else // block & deschedule 225 printk("\n[DBG] %s : thread %x in process %x received response for rpc %s / cycle %d\n", 226 __FUNCTION__, this->trdid, this->process->pid, rpc_str[rpc->index], cycle ); 227 #endif 228 229 } 230 else // block and deschedule policy 186 231 { 187 232 … … 189 234 cycle = (uint32_t)hal_get_cycles(); 190 235 if( DEBUG_RPC_CLIENT_GENERIC < cycle ) 191 printk("\n[DBG] %s : thread %x in process %x blocks & deschedules for rpc[%d] / cycle %d\n", 192 __FUNCTION__, this->trdid, this->process->pid, rpc->index , cycle ); 193 #endif 194 thread_block( XPTR( local_cxy , this ) , THREAD_BLOCKED_RPC ); 195 sched_yield("blocked on RPC"); 236 printk("\n[DBG] %s : thread %x in process %x blocks & deschedules for rpc %s / cycle %d\n", 237 __FUNCTION__, this->trdid, this->process->pid, rpc_str[rpc->index], cycle ); 238 #endif 239 240 // block client thread 241 thread_block( XPTR( local_cxy , this ) , THREAD_BLOCKED_RPC ); 242 243 // deschedule 244 sched_yield("blocked on RPC"); 196 245 197 246 #if DEBUG_RPC_CLIENT_GENERIC 198 247 cycle = (uint32_t)hal_get_cycles(); 199 248 if( DEBUG_RPC_CLIENT_GENERIC < cycle ) 200 printk("\n[DBG] %s : thread %x in process %x resumes for rpc [%d]/ cycle %d\n",201 __FUNCTION__, this->trdid, this->process->pid, rpc ->index, cycle );249 printk("\n[DBG] %s : thread %x in process %x resumes for rpc %s / cycle %d\n", 250 __FUNCTION__, this->trdid, this->process->pid, rpc_str[rpc->index], cycle ); 202 251 #endif 203 252 } 204 253 205 // check response available 206 assert( (rpc->responses == 0) , "illegal RPC response\n" ); 254 // response must be available for a blocking RPC 255 assert( (rpc->responses == 0) , "illegal response for RPC %s\n", rpc_str[rpc->index] ); 256 207 257 } 208 else // non blocking RPC258 else // non blocking RPC 209 259 { 210 260 … … 212 262 cycle = (uint32_t)hal_get_cycles(); 213 263 if( DEBUG_RPC_CLIENT_GENERIC < cycle ) 214 printk("\n[DBG] %s : thread %x in process %x returns for non blocking rpc [%d]/ cycle %d\n",215 __FUNCTION__, this->trdid, this->process->pid, rpc ->index, cycle );264 printk("\n[DBG] %s : thread %x in process %x returns for non blocking rpc %s / cycle %d\n", 265 __FUNCTION__, this->trdid, this->process->pid, rpc_str[rpc->index], cycle ); 216 266 #endif 217 267 … … 224 274 /***************************************************************************************/ 225 275 226 //////////////// 227 void rpc_check( void ) 228 { 229 error_t error; 230 thread_t * thread; 231 uint32_t sr_save; 232 233 #if DEBUG_RPC_SERVER_GENERIC 234 uint32_t cycle; 235 #endif 236 237 bool_t found = false; 238 thread_t * this = CURRENT_THREAD; 239 core_t * core = this->core; 240 scheduler_t * sched = &core->scheduler; 241 remote_fifo_t * rpc_fifo = &LOCAL_CLUSTER->rpc_fifo[core->lid]; 242 243 // interrupted thread not preemptable during RPC chek 244 hal_disable_irq( &sr_save ); 245 246 // activate (or create) RPC thread if RPC FIFO not empty and no acive RPC thread 247 if( (rpc_fifo->owner == 0) && (local_fifo_is_empty(rpc_fifo) == false) ) 248 { 249 250 #if DEBUG_RPC_SERVER_GENERIC 251 cycle = (uint32_t)hal_get_cycles(); 252 if( DEBUG_RPC_SERVER_GENERIC < cycle ) 253 printk("\n[DBG] %s : RPC FIFO non empty for core[%x,%d] / cycle %d\n", 254 __FUNCTION__, local_cxy, core->lid, cycle ); 255 #endif 256 257 // search one IDLE RPC thread associated to the selected core 258 list_entry_t * iter; 259 LIST_FOREACH( &sched->k_root , iter ) 260 { 261 thread = LIST_ELEMENT( iter , thread_t , sched_list ); 262 if( (thread->type == THREAD_RPC) && (thread->blocked == THREAD_BLOCKED_IDLE ) ) 263 { 264 // unblock found RPC thread 265 thread_unblock( XPTR( local_cxy , thread ) , THREAD_BLOCKED_IDLE ); 266 267 // exit loop 268 found = true; 269 break; 270 } 271 } 272 273 // create new RPC thread for the selected core if not found 274 if( found == false ) 275 { 276 error = thread_kernel_create( &thread, 277 THREAD_RPC, 278 &rpc_thread_func, 279 NULL, 280 core->lid ); 281 282 assert( (error == 0), 283 "no memory to allocate a new RPC thread in cluster %x", local_cxy ); 284 285 // unblock created RPC thread 286 thread->blocked = 0; 287 288 // update RRPC threads counter 289 hal_atomic_add( &LOCAL_CLUSTER->rpc_threads[core->lid] , 1 ); 290 291 #if DEBUG_RPC_SERVER_GENERIC 292 cycle = (uint32_t)hal_get_cycles(); 293 if( DEBUG_RPC_SERVER_GENERIC < cycle ) 294 printk("\n[DBG] %s : new RPC thread %x created for core[%x,%d] / cycle %d\n", 295 __FUNCTION__, thread, local_cxy, core->lid, cycle ); 296 #endif 297 } 298 } 299 300 #if DEBUG_RPC_SERVER_GENERIC 301 cycle = (uint32_t)hal_get_cycles(); 302 if( DEBUG_RPC_SERVER_GENERIC < cycle ) 303 printk("\n[DBG] %s : interrupted thread %x deschedules on core[%x,%d] / cycle %d\n", 304 __FUNCTION__, this, local_cxy, core->lid, cycle ); 305 #endif 306 307 // interrupted thread always deschedule 308 sched_yield("IPI received"); 309 310 #if DEBUG_RPC_SERVER_GENERIC 311 cycle = (uint32_t)hal_get_cycles(); 312 if( DEBUG_RPC_SERVER_GENERIC < cycle ) 313 printk("\n[DBG] %s : interrupted thread %x resumes on core[%x,%d] / cycle %d\n", 314 __FUNCTION__, this, local_cxy, core->lid, cycle ); 315 #endif 316 317 // interrupted thread restore IRQs after resume 318 hal_restore_irq( sr_save ); 319 320 } // end rpc_check() 321 322 323 ////////////////////// 276 //////////////////////////// 324 277 void rpc_thread_func( void ) 325 278 { … … 345 298 rpc_fifo = &LOCAL_CLUSTER->rpc_fifo[server_core_lid]; 346 299 347 // two embedded loops: 348 // - external loop : "infinite" RPC thread 349 // - internal loop : handle one RPC request per iteration 350 351 while(1) // infinite loop 300 // "infinite" RPC thread loop 301 while(1) 352 302 { 353 303 // try to take RPC_FIFO ownership 354 if( hal_atomic_test_set( &rpc_fifo->owner , server_ptr->trdid ) ) 304 if( hal_atomic_test_set( &rpc_fifo->owner , server_ptr->trdid ) ) 355 305 { 356 306 … … 358 308 uint32_t cycle = (uint32_t)hal_get_cycles(); 359 309 if( DEBUG_RPC_SERVER_GENERIC < cycle ) 360 printk("\n[DBG] %s : RPC thread %x in cluster %x takes RPC fifo ownership / cycle %d\n", 361 __FUNCTION__, server_ptr, local_cxy, cycle ); 362 #endif 363 while( 1 ) // one RPC request per iteration 310 printk("\n[DBG] %s : RPC thread %x on core[%d] takes RPC_FIFO ownership / cycle %d\n", 311 __FUNCTION__, server_ptr->trdid, server_core_lid, cycle ); 312 #endif 313 // try to consume one RPC request 314 empty = remote_fifo_get_item( rpc_fifo , (uint64_t *)&desc_xp ); 315 316 // release RPC_FIFO ownership 317 rpc_fifo->owner = 0; 318 319 // handle RPC request if success 320 if ( empty == 0 ) 364 321 { 365 empty = local_fifo_get_item( rpc_fifo , (uint64_t *)&desc_xp ); 366 367 // exit when FIFO empty or FIFO ownership lost (in case of descheduling) 368 if ( (empty == 0) && (rpc_fifo->owner == server_ptr->trdid) ) 322 // get client cluster and pointer on RPC descriptor 323 desc_cxy = GET_CXY( desc_xp ); 324 desc_ptr = GET_PTR( desc_xp ); 325 326 index = hal_remote_l32( XPTR( desc_cxy , &desc_ptr->index ) ); 327 blocking = hal_remote_l32( XPTR( desc_cxy , &desc_ptr->blocking ) ); 328 329 #if DEBUG_RPC_SERVER_GENERIC 330 cycle = (uint32_t)hal_get_cycles(); 331 uint32_t items = remote_fifo_items( XPTR( local_cxy , rpc_fifo ) ); 332 if( DEBUG_RPC_SERVER_GENERIC < cycle ) 333 printk("\n[DBG] %s : RPC thread %x got rpc %s / client_cxy %x / items %d / cycle %d\n", 334 __FUNCTION__, server_ptr->trdid, rpc_str[index], desc_cxy, items, cycle ); 335 #endif 336 // call the relevant server function 337 rpc_server[index]( desc_xp ); 338 339 #if DEBUG_RPC_SERVER_GENERIC 340 cycle = (uint32_t)hal_get_cycles(); 341 if( DEBUG_RPC_SERVER_GENERIC < cycle ) 342 printk("\n[DBG] %s : RPC thread %x completes rpc %s / client_cxy %x / cycle %d\n", 343 __FUNCTION__, server_ptr->trdid, rpc_str[index], desc_cxy, cycle ); 344 #endif 345 // decrement response counter in RPC descriptor if blocking RPC 346 if( blocking ) 369 347 { 370 // get client cluster and pointer on RPC descriptor 371 desc_cxy = GET_CXY( desc_xp ); 372 desc_ptr = GET_PTR( desc_xp ); 373 374 index = hal_remote_lw( XPTR( desc_cxy , &desc_ptr->index ) ); 375 blocking = hal_remote_lw( XPTR( desc_cxy , &desc_ptr->blocking ) ); 348 // decrement responses counter in RPC descriptor 349 hal_remote_atomic_add( XPTR( desc_cxy, &desc_ptr->responses ), -1 ); 350 351 // get client thread pointer and client core lid from RPC descriptor 352 client_ptr = hal_remote_lpt( XPTR( desc_cxy , &desc_ptr->thread ) ); 353 client_core_lid = hal_remote_l32 ( XPTR( desc_cxy , &desc_ptr->lid ) ); 354 355 // unblock client thread 356 thread_unblock( XPTR( desc_cxy , client_ptr ) , THREAD_BLOCKED_RPC ); 357 358 hal_fence(); 376 359 377 360 #if DEBUG_RPC_SERVER_GENERIC 378 361 cycle = (uint32_t)hal_get_cycles(); 379 362 if( DEBUG_RPC_SERVER_GENERIC < cycle ) 380 printk("\n[DBG] %s : RPC thread %x in cluster %x got rpc[%d] / rpc_cxy %x / rpc_ptr %x\n", 381 __FUNCTION__, server_ptr, local_cxy, index, desc_cxy, desc_ptr ); 382 #endif 383 // call the relevant server function 384 rpc_server[index]( desc_xp ); 385 386 #if DEBUG_RPC_SERVER_GENERIC 387 cycle = (uint32_t)hal_get_cycles(); 388 if( DEBUG_RPC_SERVER_GENERIC < cycle ) 389 printk("\n[DBG] %s : RPC thread %x in cluster %x completes rpc[%d] / rpc_ptr %x / cycle %d\n", 390 __FUNCTION__, server_ptr, local_cxy, index, desc_ptr, cycle ); 391 #endif 392 // decrement response counter in RPC descriptor if blocking 393 if( blocking ) 394 { 395 // decrement responses counter in RPC descriptor 396 hal_remote_atomic_add( XPTR( desc_cxy, &desc_ptr->responses ), -1 ); 397 398 // get client thread pointer and client core lid from RPC descriptor 399 client_ptr = hal_remote_lpt( XPTR( desc_cxy , &desc_ptr->thread ) ); 400 client_core_lid = hal_remote_lw ( XPTR( desc_cxy , &desc_ptr->lid ) ); 401 402 // unblock client thread 403 thread_unblock( XPTR( desc_cxy , client_ptr ) , THREAD_BLOCKED_RPC ); 404 405 hal_fence(); 406 407 #if DEBUG_RPC_SERVER_GENERIC 408 cycle = (uint32_t)hal_get_cycles(); 409 if( DEBUG_RPC_SERVER_GENERIC < cycle ) 410 printk("\n[DBG] %s : RPC thread %x (cluster %x) unblocked client thread %x (cluster %x)\n", 411 __FUNCTION__, server_ptr, local_cxy, client_ptr, desc_cxy, cycle ); 412 #endif 413 // send IPI to client core 414 // dev_pic_send_ipi( desc_cxy , client_core_lid ); 415 } 416 } 417 else 418 { 419 break; 420 } 421 } // end internal loop 422 423 // release rpc_fifo ownership if not lost 424 if( rpc_fifo->owner == server_ptr->trdid ) rpc_fifo->owner = 0; 425 426 } // end if RPC fifo 427 428 // RPC thread blocks on IDLE 429 thread_block( server_xp , THREAD_BLOCKED_IDLE ); 430 431 // sucide if too many RPC threads / simply deschedule otherwise 363 printk("\n[DBG] %s : RPC thread %x unblocked client thread %x / cycle %d\n", 364 __FUNCTION__, server_ptr->trdid, client_ptr->trdid, cycle ); 365 #endif 366 // send IPI to client core 367 dev_pic_send_ipi( desc_cxy , client_core_lid ); 368 369 } // end if blocking RPC 370 } // end RPC handling if fifo non empty 371 } // end if RPC_fIFO ownership successfully taken and released 372 373 // sucide if too many RPC threads 432 374 if( LOCAL_CLUSTER->rpc_threads[server_core_lid] >= CONFIG_RPC_THREADS_MAX ) 433 375 { … … 436 378 uint32_t cycle = (uint32_t)hal_get_cycles(); 437 379 if( DEBUG_RPC_SERVER_GENERIC < cycle ) 438 printk("\n[DBG] %s : RPC thread %x in cluster %xsuicides / cycle %d\n",439 __FUNCTION__, server_ptr , local_cxy, cycle );380 printk("\n[DBG] %s : RPC thread %x suicides / cycle %d\n", 381 __FUNCTION__, server_ptr->trdid, cycle ); 440 382 #endif 441 383 // update RPC threads counter 442 hal_atomic_add( &LOCAL_CLUSTER->rpc_threads , -1 );384 hal_atomic_add( &LOCAL_CLUSTER->rpc_threads[server_core_lid] , -1 ); 443 385 444 386 // RPC thread blocks on GLOBAL … … 448 390 hal_remote_atomic_or( server_xp , THREAD_FLAG_REQ_DELETE ); 449 391 } 392 // block and deschedule otherwise 450 393 else 451 394 { … … 454 397 uint32_t cycle = (uint32_t)hal_get_cycles(); 455 398 if( DEBUG_RPC_SERVER_GENERIC < cycle ) 456 printk("\n[DBG] %s : RPC thread %x in cluster %x block & deschedules / cycle %d\n", 457 __FUNCTION__, server_ptr, local_cxy, cycle ); 458 #endif 399 printk("\n[DBG] %s : RPC thread %x block IDLE & deschedules / cycle %d\n", 400 __FUNCTION__, server_ptr->trdid, cycle ); 401 #endif 402 // RPC thread blocks on IDLE 403 thread_block( server_xp , THREAD_BLOCKED_IDLE ); 459 404 460 405 // RPC thread deschedules 461 assert( thread_can_yield() , "illegal sched_yield\n" ); 462 sched_yield("RPC fifo empty"); 406 sched_yield("RPC_FIFO empty"); 463 407 } 464 465 408 } // end infinite loop 466 467 409 } // end rpc_thread_func() 468 410 … … 478 420 { 479 421 #if DEBUG_RPC_PMEM_GET_PAGES 422 thread_t * this = CURRENT_THREAD; 480 423 uint32_t cycle = (uint32_t)hal_get_cycles(); 481 424 if( cycle > DEBUG_RPC_PMEM_GET_PAGES ) 482 printk("\n[DBG] %s : thread %x enter on core[%x,%d]/ cycle %d\n",483 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid, cycle );425 printk("\n[DBG] %s : thread %x in process %x on core %d enter / cycle %d\n", 426 __FUNCTION__, this->trdid, this->process->pid, this->core->lid, cycle ); 484 427 #endif 485 428 … … 504 447 cycle = (uint32_t)hal_get_cycles(); 505 448 if( cycle > DEBUG_RPC_PMEM_GET_PAGES ) 506 printk("\n[DBG] %s : thread %x exit / cycle %d\n",507 __FUNCTION__ , CURRENT_THREAD, cycle );449 printk("\n[DBG] %s : thread %x in process %x on core %d exit / cycle %d\n", 450 __FUNCTION__, this->trdid, this->process->pid, this->core->lid, cycle ); 508 451 #endif 509 452 } … … 513 456 { 514 457 #if DEBUG_RPC_PMEM_GET_PAGES 458 thread_t * this = CURRENT_THREAD; 515 459 uint32_t cycle = (uint32_t)hal_get_cycles(); 516 460 if( cycle > DEBUG_RPC_PMEM_GET_PAGES ) 517 printk("\n[DBG] %s : thread %x enter on core[%x,%d]/ cycle %d\n",518 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid, cycle );461 printk("\n[DBG] %s : thread %x in process %x on core %d enter / cycle %d\n", 462 __FUNCTION__, this->trdid, this->process->pid, this->core->lid, cycle ); 519 463 #endif 520 464 … … 524 468 525 469 // get input arguments from client RPC descriptor 526 uint32_t order = (uint32_t)hal_remote_l wd( XPTR( cxy , &desc->args[0] ) );470 uint32_t order = (uint32_t)hal_remote_l64( XPTR( cxy , &desc->args[0] ) ); 527 471 528 472 // call local pmem allocator … … 530 474 531 475 // set output arguments into client RPC descriptor 532 hal_remote_s wd( XPTR( cxy , &desc->args[1] ) , (uint64_t)(intptr_t)page );476 hal_remote_s64( XPTR( cxy , &desc->args[1] ) , (uint64_t)(intptr_t)page ); 533 477 534 478 #if DEBUG_RPC_PMEM_GET_PAGES 535 479 cycle = (uint32_t)hal_get_cycles(); 536 480 if( cycle > DEBUG_RPC_PMEM_GET_PAGES ) 537 printk("\n[DBG] %s : thread %x exit / cycle %d\n",538 __FUNCTION__ , CURRENT_THREAD, cycle );481 printk("\n[DBG] %s : thread %x in process %x on core %d exit / cycle %d\n", 482 __FUNCTION__, this->trdid, this->process->pid, this->core->lid, cycle ); 539 483 #endif 540 484 } … … 549 493 { 550 494 #if DEBUG_RPC_PMEM_RELEASE_PAGES 495 thread_t * this = CURRENT_THREAD; 551 496 uint32_t cycle = (uint32_t)hal_get_cycles(); 552 497 if( cycle > DEBUG_RPC_PMEM_RELEASE_PAGES ) 553 printk("\n[DBG] %s : thread %x enter on core[%x,%d]/ cycle %d\n",554 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle );498 printk("\n[DBG] %s : thread %x in process %x on core %d enter / cycle %d\n", 499 __FUNCTION__, this->trdid, this->process->pid, this->core->lid , cycle ); 555 500 #endif 556 501 … … 572 517 cycle = (uint32_t)hal_get_cycles(); 573 518 if( cycle > DEBUG_RPC_PMEM_RELEASE_PAGES ) 574 printk("\n[DBG] %s : thread %x exit / cycle %d\n",575 __FUNCTION__ , CURRENT_THREAD, cycle );519 printk("\n[DBG] %s : thread %x in process %x on core %d exit / cycle %d\n", 520 __FUNCTION__, this->trdid, this->process->pid, this->core->lid, cycle ); 576 521 #endif 577 522 } … … 581 526 { 582 527 #if DEBUG_RPC_PMEM_RELEASE_PAGES 528 thread_t * this = CURRENT_THREAD; 583 529 uint32_t cycle = (uint32_t)hal_get_cycles(); 584 530 if( cycle > DEBUG_RPC_PMEM_RELEASE_PAGES ) 585 printk("\n[DBG] %s : thread %x enter on core[%x,%d]/ cycle %d\n",586 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle );531 printk("\n[DBG] %s : thread %x in process %x on core %d enter / cycle %d\n", 532 __FUNCTION__, this->trdid, this->process->pid, this->core->lid , cycle ); 587 533 #endif 588 534 … … 592 538 593 539 // get input arguments from client RPC descriptor 594 page_t * page = (page_t *)(intptr_t)hal_remote_l wd( XPTR( cxy , &desc->args[0] ) );540 page_t * page = (page_t *)(intptr_t)hal_remote_l64( XPTR( cxy , &desc->args[0] ) ); 595 541 596 542 // release memory to local pmem … … 603 549 cycle = (uint32_t)hal_get_cycles(); 604 550 if( cycle > DEBUG_RPC_PMEM_RELEASE_PAGES ) 605 printk("\n[DBG] %s : thread %x exit / cycle %d\n",606 __FUNCTION__ , CURRENT_THREAD, cycle );551 printk("\n[DBG] %s : thread %x in process %x on core %d exit / cycle %d\n", 552 __FUNCTION__, this->trdid, this->process->pid, this->core->lid, cycle ); 607 553 #endif 608 554 } … … 625 571 { 626 572 #if DEBUG_RPC_PROCESS_MAKE_FORK 573 thread_t * this = CURRENT_THREAD; 627 574 uint32_t cycle = (uint32_t)hal_get_cycles(); 628 575 if( cycle > DEBUG_RPC_PROCESS_MAKE_FORK ) 629 printk("\n[DBG] %s : thread %x enter on core[%x,%d]/ cycle %d\n",630 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle );576 printk("\n[DBG] %s : thread %x in process %x on core %d enter / cycle %d\n", 577 __FUNCTION__, this->trdid, this->process->pid, this->core->lid , cycle ); 631 578 #endif 632 579 … … 654 601 cycle = (uint32_t)hal_get_cycles(); 655 602 if( cycle > DEBUG_RPC_PROCESS_MAKE_FORK ) 656 printk("\n[DBG] %s : thread %x exit / cycle %d\n",657 __FUNCTION__ , CURRENT_THREAD, cycle );603 printk("\n[DBG] %s : thread %x in process %x on core %d exit / cycle %d\n", 604 __FUNCTION__, this->trdid, this->process->pid, this->core->lid, cycle ); 658 605 #endif 659 606 } … … 663 610 { 664 611 #if DEBUG_RPC_PROCESS_MAKE_FORK 612 thread_t * this = CURRENT_THREAD; 665 613 uint32_t cycle = (uint32_t)hal_get_cycles(); 666 614 if( cycle > DEBUG_RPC_PROCESS_MAKE_FORK ) 667 printk("\n[DBG] %s : thread %x enter on core[%x,%d]/ cycle %d\n",668 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid, cycle );615 printk("\n[DBG] %s : thread %x in process %x on core %d enter / cycle %d\n", 616 __FUNCTION__, this->trdid, this->process->pid, this->core->lid, cycle ); 669 617 #endif 670 618 … … 680 628 681 629 // get input arguments from cient RPC descriptor 682 ref_process_xp = (xptr_t)hal_remote_l wd( XPTR( client_cxy , &desc->args[0] ) );683 parent_thread_xp = (xptr_t)hal_remote_l wd( XPTR( client_cxy , &desc->args[1] ) );630 ref_process_xp = (xptr_t)hal_remote_l64( XPTR( client_cxy , &desc->args[0] ) ); 631 parent_thread_xp = (xptr_t)hal_remote_l64( XPTR( client_cxy , &desc->args[1] ) ); 684 632 685 633 // call local kernel function … … 690 638 691 639 // set output argument into client RPC descriptor 692 hal_remote_s wd( XPTR( client_cxy , &desc->args[2] ) , (uint64_t)child_pid );693 hal_remote_s wd( XPTR( client_cxy , &desc->args[3] ) , (uint64_t)(intptr_t)child_thread_ptr );694 hal_remote_s wd( XPTR( client_cxy , &desc->args[4] ) , (uint64_t)error );640 hal_remote_s64( XPTR( client_cxy , &desc->args[2] ) , (uint64_t)child_pid ); 641 hal_remote_s64( XPTR( client_cxy , &desc->args[3] ) , (uint64_t)(intptr_t)child_thread_ptr ); 642 hal_remote_s64( XPTR( client_cxy , &desc->args[4] ) , (uint64_t)error ); 695 643 696 644 #if DEBUG_RPC_PROCESS_MAKE_FORK 697 645 cycle = (uint32_t)hal_get_cycles(); 698 646 if( cycle > DEBUG_RPC_PROCESS_MAKE_FORK ) 699 printk("\n[DBG] %s : thread %x exit / cycle %d\n",700 __FUNCTION__ , CURRENT_THREAD, cycle );647 printk("\n[DBG] %s : thread %x in process %x on core %d exit / cycle %d\n", 648 __FUNCTION__, this->trdid, this->process->pid, this->core->lid, cycle ); 701 649 #endif 702 650 } … … 723 671 error_t * error ) // out 724 672 { 673 #if DEBUG_RPC_THREAD_USER_CREATE 674 thread_t * this = CURRENT_THREAD; 675 uint32_t cycle = (uint32_t)hal_get_cycles(); 676 if( cycle > DEBUG_RPC_THREAD_USER_CREATE) 677 printk("\n[DBG] %s : thread %x in process %x on core %d enter / cycle %d\n", 678 __FUNCTION__, this->trdid, this->process->pid, this->core->lid, cycle ); 679 #endif 680 725 681 assert( (cxy != local_cxy) , "target cluster is not remote\n"); 726 682 … … 744 700 *error = (error_t)rpc.args[5]; 745 701 702 #if DEBUG_RPC_THREAD_USER_CREATE 703 cycle = (uint32_t)hal_get_cycles(); 704 if( cycle > DEBUG_RPC_THREAD_USER_CREATE) 705 printk("\n[DBG] %s : thread %x in process %x on core %d exit / cycle %d\n", 706 __FUNCTION__, this->trdid, this->process->pid, this->core->lid, cycle ); 707 #endif 746 708 } 747 709 … … 749 711 void rpc_thread_user_create_server( xptr_t xp ) 750 712 { 713 #if DEBUG_RPC_THREAD_USER_CREATE 714 thread_t * this = CURRENT_THREAD; 715 uint32_t cycle = (uint32_t)hal_get_cycles(); 716 if( cycle > DEBUG_RPC_THREAD_USER_CREATE) 717 printk("\n[DBG] %s : thread %x in process %x on core %d enter / cycle %d\n", 718 __FUNCTION__, this->trdid, this->process->pid, this->core->lid, cycle ); 719 #endif 751 720 752 721 pthread_attr_t * attr_ptr; // pointer on attributes structure in client cluster … … 767 736 768 737 // get input arguments from RPC descriptor 769 pid = (pid_t) hal_remote_l wd(XPTR(client_cxy , &desc->args[0]));770 start_func = (void *)(intptr_t) hal_remote_l wd(XPTR(client_cxy , &desc->args[1]));771 start_arg = (void *)(intptr_t) hal_remote_l wd(XPTR(client_cxy , &desc->args[2]));772 attr_ptr = (pthread_attr_t *)(intptr_t)hal_remote_l wd(XPTR(client_cxy , &desc->args[3]));738 pid = (pid_t) hal_remote_l64(XPTR(client_cxy , &desc->args[0])); 739 start_func = (void *)(intptr_t) hal_remote_l64(XPTR(client_cxy , &desc->args[1])); 740 start_arg = (void *)(intptr_t) hal_remote_l64(XPTR(client_cxy , &desc->args[2])); 741 attr_ptr = (pthread_attr_t *)(intptr_t)hal_remote_l64(XPTR(client_cxy , &desc->args[3])); 773 742 774 743 // makes a local copy of attributes structure … … 786 755 // set output arguments 787 756 thread_xp = XPTR( local_cxy , thread_ptr ); 788 hal_remote_swd( XPTR( client_cxy , &desc->args[4] ) , (uint64_t)thread_xp ); 789 hal_remote_swd( XPTR( client_cxy , &desc->args[5] ) , (uint64_t)error ); 790 757 hal_remote_s64( XPTR( client_cxy , &desc->args[4] ) , (uint64_t)thread_xp ); 758 hal_remote_s64( XPTR( client_cxy , &desc->args[5] ) , (uint64_t)error ); 759 760 #if DEBUG_RPC_THREAD_USER_CREATE 761 cycle = (uint32_t)hal_get_cycles(); 762 if( cycle > DEBUG_RPC_THREAD_USER_CREATE) 763 printk("\n[DBG] %s : thread %x in process %x on core %d exit / cycle %d\n", 764 __FUNCTION__, this->trdid, this->process->pid, this->core->lid, cycle ); 765 #endif 791 766 } 792 767 … … 803 778 error_t * error ) // out 804 779 { 780 #if DEBUG_RPC_THREAD_KERNEL_CREATE 781 thread_t * this = CURRENT_THREAD; 782 uint32_t cycle = (uint32_t)hal_get_cycles(); 783 if( cycle > DEBUG_RPC_THREAD_KERNEL_CREATE) 784 printk("\n[DBG] %s : thread %x in process %x on core %d enter / cycle %d\n", 785 __FUNCTION__, this->trdid, this->process->pid, this->core->lid, cycle ); 786 #endif 787 805 788 assert( (cxy != local_cxy) , "target cluster is not remote\n"); 806 789 … … 823 806 *error = (error_t)rpc.args[4]; 824 807 808 #if DEBUG_RPC_THREAD_KERNEL_CREATE 809 cycle = (uint32_t)hal_get_cycles(); 810 if( cycle > DEBUG_RPC_THREAD_KERNEL_CREATE) 811 printk("\n[DBG] %s : thread %x in process %x on core %d exit / cycle %d\n", 812 __FUNCTION__, this->trdid, this->process->pid, this->core->lid, cycle ); 813 #endif 825 814 } 826 815 … … 828 817 void rpc_thread_kernel_create_server( xptr_t xp ) 829 818 { 819 #if DEBUG_RPC_THREAD_KERNEL_CREATE 820 thread_t * this = CURRENT_THREAD; 821 uint32_t cycle = (uint32_t)hal_get_cycles(); 822 if( cycle > DEBUG_RPC_THREAD_KERNEL_CREATE) 823 printk("\n[DBG] %s : thread %x in process %x on core %d enter / cycle %d\n", 824 __FUNCTION__, this->trdid, this->process->pid, this->core->lid, cycle ); 825 #endif 826 830 827 thread_t * thread_ptr; // local pointer on thread descriptor 831 828 xptr_t thread_xp; // extended pointer on thread descriptor … … 838 835 839 836 // get attributes from RPC descriptor 840 uint32_t type = (uint32_t) hal_remote_l wd( XPTR( client_cxy , &desc->args[0] ) );841 void * func = (void*)(intptr_t)hal_remote_l wd( XPTR( client_cxy , &desc->args[1] ) );842 void * args = (void*)(intptr_t)hal_remote_l wd( XPTR( client_cxy , &desc->args[2] ) );837 uint32_t type = (uint32_t) hal_remote_l64( XPTR( client_cxy , &desc->args[0] ) ); 838 void * func = (void*)(intptr_t)hal_remote_l64( XPTR( client_cxy , &desc->args[1] ) ); 839 void * args = (void*)(intptr_t)hal_remote_l64( XPTR( client_cxy , &desc->args[2] ) ); 843 840 844 841 // select one core … … 850 847 // set output arguments 851 848 thread_xp = XPTR( local_cxy , thread_ptr ); 852 hal_remote_swd( XPTR( client_cxy , &desc->args[1] ) , (uint64_t)error ); 853 hal_remote_swd( XPTR( client_cxy , &desc->args[2] ) , (uint64_t)thread_xp ); 854 849 hal_remote_s64( XPTR( client_cxy , &desc->args[1] ) , (uint64_t)error ); 850 hal_remote_s64( XPTR( client_cxy , &desc->args[2] ) , (uint64_t)thread_xp ); 851 852 #if DEBUG_RPC_THREAD_KERNEL_CREATE 853 cycle = (uint32_t)hal_get_cycles(); 854 if( cycle > DEBUG_RPC_THREAD_KERNEL_CREATE) 855 printk("\n[DBG] %s : thread %x in process %x on core %d exit / cycle %d\n", 856 __FUNCTION__, this->trdid, this->process->pid, this->core->lid, cycle ); 857 #endif 855 858 } 856 859 … … 913 916 914 917 // get arguments from RPC descriptor 915 action = (uint32_t)hal_remote_l wd( XPTR(client_cxy , &rpc->args[0]) );916 pid = (pid_t) hal_remote_l wd( XPTR(client_cxy , &rpc->args[1]) );918 action = (uint32_t)hal_remote_l64( XPTR(client_cxy , &rpc->args[0]) ); 919 pid = (pid_t) hal_remote_l64( XPTR(client_cxy , &rpc->args[1]) ); 917 920 918 921 #if DEBUG_RPC_PROCESS_SIGACTION … … 945 948 { 946 949 // get client core lid 947 client_lid = (lid_t) hal_remote_l w( XPTR( client_cxy , &rpc->lid ) );950 client_lid = (lid_t) hal_remote_l32 ( XPTR( client_cxy , &rpc->lid ) ); 948 951 949 952 // unblock client thread … … 981 984 { 982 985 #if DEBUG_RPC_VFS_INODE_CREATE 986 thread_t * this = CURRENT_THREAD; 983 987 uint32_t cycle = (uint32_t)hal_get_cycles(); 984 988 if( cycle > DEBUG_RPC_VFS_INODE_CREATE ) 985 printk("\n[DBG] %s : thread %x enter on core[%x,%d]/ cycle %d\n",986 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle );989 printk("\n[DBG] %s : thread %x in process %x on core %d enter / cycle %d\n", 990 __FUNCTION__, this->trdid, this->process->pid, this->core->lid , cycle ); 987 991 #endif 988 992 … … 1013 1017 1014 1018 #if DEBUG_RPC_VFS_INODE_CREATE 1015 uint32_tcycle = (uint32_t)hal_get_cycles();1019 cycle = (uint32_t)hal_get_cycles(); 1016 1020 if( cycle > DEBUG_RPC_VFS_INODE_CREATE ) 1017 printk("\n[DBG] %s : thread %x exit on core[%x,%d]/ cycle %d\n",1018 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid, cycle );1021 printk("\n[DBG] %s : thread %x in process %x on core %d exit / cycle %d\n", 1022 __FUNCTION__, this->trdid, this->process->pid, this->core->lid, cycle ); 1019 1023 #endif 1020 1024 } … … 1024 1028 { 1025 1029 #if DEBUG_RPC_VFS_INODE_CREATE 1030 thread_t * this = CURRENT_THREAD; 1026 1031 uint32_t cycle = (uint32_t)hal_get_cycles(); 1027 1032 if( cycle > DEBUG_RPC_VFS_INODE_CREATE ) 1028 printk("\n[DBG] %s : thread %x enter on core[%x,%d]/ cycle %d\n",1029 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid, cycle );1033 printk("\n[DBG] %s : thread %x in process %x on core %d enter / cycle %d\n", 1034 __FUNCTION__, this->trdid, this->process->pid, this->core->lid, cycle ); 1030 1035 #endif 1031 1036 … … 1046 1051 1047 1052 // get input arguments from client rpc descriptor 1048 dentry_xp = (xptr_t) hal_remote_l wd( XPTR( client_cxy , &desc->args[0] ) );1049 fs_type = (uint32_t) hal_remote_l wd( XPTR( client_cxy , &desc->args[1] ) );1050 inode_type = (uint32_t) hal_remote_l wd( XPTR( client_cxy , &desc->args[2] ) );1051 extend = (void *)(intptr_t)hal_remote_l wd( XPTR( client_cxy , &desc->args[3] ) );1052 attr = (uint32_t) hal_remote_l wd( XPTR( client_cxy , &desc->args[4] ) );1053 rights = (uint32_t) hal_remote_l wd( XPTR( client_cxy , &desc->args[5] ) );1054 uid = (uid_t) hal_remote_l wd( XPTR( client_cxy , &desc->args[6] ) );1055 gid = (gid_t) hal_remote_l wd( XPTR( client_cxy , &desc->args[7] ) );1053 dentry_xp = (xptr_t) hal_remote_l64( XPTR( client_cxy , &desc->args[0] ) ); 1054 fs_type = (uint32_t) hal_remote_l64( XPTR( client_cxy , &desc->args[1] ) ); 1055 inode_type = (uint32_t) hal_remote_l64( XPTR( client_cxy , &desc->args[2] ) ); 1056 extend = (void *)(intptr_t)hal_remote_l64( XPTR( client_cxy , &desc->args[3] ) ); 1057 attr = (uint32_t) hal_remote_l64( XPTR( client_cxy , &desc->args[4] ) ); 1058 rights = (uint32_t) hal_remote_l64( XPTR( client_cxy , &desc->args[5] ) ); 1059 uid = (uid_t) hal_remote_l64( XPTR( client_cxy , &desc->args[6] ) ); 1060 gid = (gid_t) hal_remote_l64( XPTR( client_cxy , &desc->args[7] ) ); 1056 1061 1057 1062 // call local kernel function … … 1067 1072 1068 1073 // set output arguments 1069 hal_remote_s wd( XPTR( client_cxy , &desc->args[8] ) , (uint64_t)inode_xp );1070 hal_remote_s wd( XPTR( client_cxy , &desc->args[9] ) , (uint64_t)error );1074 hal_remote_s64( XPTR( client_cxy , &desc->args[8] ) , (uint64_t)inode_xp ); 1075 hal_remote_s64( XPTR( client_cxy , &desc->args[9] ) , (uint64_t)error ); 1071 1076 1072 1077 #if DEBUG_RPC_VFS_INODE_CREATE 1073 uint32_tcycle = (uint32_t)hal_get_cycles();1078 cycle = (uint32_t)hal_get_cycles(); 1074 1079 if( cycle > DEBUG_RPC_VFS_INODE_CREATE ) 1075 printk("\n[DBG] %s : thread %x exit on core[%x,%d]/ cycle %d\n",1076 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid, cycle );1080 printk("\n[DBG] %s : thread %x in process %x on core %d exit / cycle %d\n", 1081 __FUNCTION__, this->trdid, this->process->pid, this->core->lid, cycle ); 1077 1082 #endif 1078 1083 } … … 1088 1093 { 1089 1094 #if DEBUG_RPC_VFS_INODE_DESTROY 1095 thread_t * this = CURRENT_THREAD; 1090 1096 uint32_t cycle = (uint32_t)hal_get_cycles(); 1091 1097 if( cycle > DEBUG_RPC_VFS_INODE_DESTROY ) 1092 printk("\n[DBG] %s : thread %x enter on core[%x,%d]/ cycle %d\n",1093 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid, cycle );1098 printk("\n[DBG] %s : thread %x in process %x on core %d enter / cycle %d\n", 1099 __FUNCTION__, this->trdid, this->process->pid, this->core->lid, cycle ); 1094 1100 #endif 1095 1101 … … 1112 1118 1113 1119 #if DEBUG_RPC_VFS_INODE_DESTROY 1114 uint32_tcycle = (uint32_t)hal_get_cycles();1120 cycle = (uint32_t)hal_get_cycles(); 1115 1121 if( cycle > DEBUG_RPC_VFS_INODE_DESTROY ) 1116 printk("\n[DBG] %s : thread %x exit on core[%x,%d]/ cycle %d\n",1117 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid, cycle );1122 printk("\n[DBG] %s : thread %x in process %x on core %d exit / cycle %d\n", 1123 __FUNCTION__, this->trdid, this->process->pid, this->core->lid, cycle ); 1118 1124 #endif 1119 1125 } … … 1123 1129 { 1124 1130 #if DEBUG_RPC_VFS_INODE_DESTROY 1131 thread_t * this = CURRENT_THREAD; 1125 1132 uint32_t cycle = (uint32_t)hal_get_cycles(); 1126 1133 if( cycle > DEBUG_RPC_VFS_INODE_DESTROY ) 1127 printk("\n[DBG] %s : thread %x enter on core[%x,%d]/ cycle %d\n",1128 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid, cycle );1134 printk("\n[DBG] %s : thread %x in process %x on core %d enter / cycle %d\n", 1135 __FUNCTION__, this->trdid, this->process->pid, this->core->lid, cycle ); 1129 1136 #endif 1130 1137 … … 1137 1144 1138 1145 // get arguments "inode" from client RPC descriptor 1139 inode = (vfs_inode_t *)(intptr_t)hal_remote_l wd( XPTR( client_cxy , &desc->args[0] ) );1146 inode = (vfs_inode_t *)(intptr_t)hal_remote_l64( XPTR( client_cxy , &desc->args[0] ) ); 1140 1147 1141 1148 // call local kernel function … … 1143 1150 1144 1151 // set output argument 1145 hal_remote_s wd( XPTR( client_cxy , &desc->args[1] ) , (uint64_t)error );1152 hal_remote_s64( XPTR( client_cxy , &desc->args[1] ) , (uint64_t)error ); 1146 1153 1147 1154 #if DEBUG_RPC_VFS_INODE_DESTROY 1148 uint32_tcycle = (uint32_t)hal_get_cycles();1155 cycle = (uint32_t)hal_get_cycles(); 1149 1156 if( cycle > DEBUG_RPC_VFS_INODE_DESTROY ) 1150 printk("\n[DBG] %s : thread %x exit on core[%x,%d]/ cycle %d\n",1151 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid, cycle );1157 printk("\n[DBG] %s : thread %x in process %x on core %d exit / cycle %d\n", 1158 __FUNCTION__, this->trdid, this->process->pid, this->core->lid, cycle ); 1152 1159 #endif 1153 1160 } … … 1166 1173 { 1167 1174 #if DEBUG_RPC_VFS_DENTRY_CREATE 1175 thread_t * this = CURRENT_THREAD; 1168 1176 uint32_t cycle = (uint32_t)hal_get_cycles(); 1169 1177 if( cycle > DEBUG_RPC_VFS_DENTRY_CREATE ) 1170 printk("\n[DBG] %s : thread %x enter on core[%x,%d]/ cycle %d\n",1171 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid, cycle );1178 printk("\n[DBG] %s : thread %x in process %x on core %d enter / cycle %d\n", 1179 __FUNCTION__, this->trdid, this->process->pid, this->core->lid, cycle ); 1172 1180 #endif 1173 1181 … … 1195 1203 cycle = (uint32_t)hal_get_cycles(); 1196 1204 if( cycle > DEBUG_RPC_VFS_DENTRY_CREATE ) 1197 printk("\n[DBG] %s : thread %x exit / cycle %d\n",1198 __FUNCTION__ , CURRENT_THREAD, cycle );1205 printk("\n[DBG] %s : thread %x in process %x on core %d exit / cycle %d\n", 1206 __FUNCTION__, this->trdid, this->process->pid, this->core->lid, cycle ); 1199 1207 #endif 1200 1208 } … … 1204 1212 { 1205 1213 #if DEBUG_RPC_VFS_DENTRY_CREATE 1214 thread_t * this = CURRENT_THREAD; 1206 1215 uint32_t cycle = (uint32_t)hal_get_cycles(); 1207 1216 if( cycle > DEBUG_RPC_VFS_DENTRY_CREATE ) 1208 printk("\n[DBG] %s : thread %x enter on core[%x,%d]/ cycle %d\n",1209 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle );1217 printk("\n[DBG] %s : thread %x in process %x on core %d enter / cycle %d\n", 1218 __FUNCTION__, this->trdid, this->process->pid, this->core->lid , cycle ); 1210 1219 #endif 1211 1220 … … 1222 1231 1223 1232 // get arguments "name", "type", and "parent" from client RPC descriptor 1224 type = (uint32_t) hal_remote_l wd( XPTR( client_cxy , &desc->args[0] ) );1225 name = (char *)(intptr_t) hal_remote_l wd( XPTR( client_cxy , &desc->args[1] ) );1226 parent = (vfs_inode_t *)(intptr_t)hal_remote_l wd( XPTR( client_cxy , &desc->args[2] ) );1233 type = (uint32_t) hal_remote_l64( XPTR( client_cxy , &desc->args[0] ) ); 1234 name = (char *)(intptr_t) hal_remote_l64( XPTR( client_cxy , &desc->args[1] ) ); 1235 parent = (vfs_inode_t *)(intptr_t)hal_remote_l64( XPTR( client_cxy , &desc->args[2] ) ); 1227 1236 1228 1237 // makes a local copy of name … … 1236 1245 &dentry_xp ); 1237 1246 // set output arguments 1238 hal_remote_s wd( XPTR( client_cxy , &desc->args[3] ) , (uint64_t)dentry_xp );1239 hal_remote_s wd( XPTR( client_cxy , &desc->args[4] ) , (uint64_t)error );1247 hal_remote_s64( XPTR( client_cxy , &desc->args[3] ) , (uint64_t)dentry_xp ); 1248 hal_remote_s64( XPTR( client_cxy , &desc->args[4] ) , (uint64_t)error ); 1240 1249 1241 1250 #if DEBUG_RPC_VFS_DENTRY_CREATE 1242 1251 cycle = (uint32_t)hal_get_cycles(); 1243 1252 if( cycle > DEBUG_RPC_VFS_DENTRY_CREATE ) 1244 printk("\n[DBG] %s : thread %x exit / cycle %d\n",1245 __FUNCTION__ , CURRENT_THREAD, cycle );1253 printk("\n[DBG] %s : thread %x in process %x on core %d exit / cycle %d\n", 1254 __FUNCTION__, this->trdid, this->process->pid, this->core->lid, cycle ); 1246 1255 #endif 1247 1256 } … … 1257 1266 { 1258 1267 #if DEBUG_RPC_VFS_DENTRY_DESTROY 1268 thread_t * this = CURRENT_THREAD; 1259 1269 uint32_t cycle = (uint32_t)hal_get_cycles(); 1260 1270 if( cycle > DEBUG_RPC_VFS_DENTRY_DESTROY ) 1261 printk("\n[DBG] %s : thread %x enter on core[%x,%d]/ cycle %d\n",1262 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle );1271 printk("\n[DBG] %s : thread %x in process %x on core %d enter / cycle %d\n", 1272 __FUNCTION__, this->trdid, this->process->pid, this->core->lid , cycle ); 1263 1273 #endif 1264 1274 … … 1283 1293 cycle = (uint32_t)hal_get_cycles(); 1284 1294 if( cycle > DEBUG_RPC_VFS_DENTRY_DESTROY ) 1285 printk("\n[DBG] %s : thread %x exit / cycle %d\n",1286 __FUNCTION__ , CURRENT_THREAD, cycle );1295 printk("\n[DBG] %s : thread %x in process %x on core %d exit / cycle %d\n", 1296 __FUNCTION__, this->trdid, this->process->pid, this->core->lid, cycle ); 1287 1297 #endif 1288 1298 } … … 1292 1302 { 1293 1303 #if DEBUG_RPC_VFS_DENTRY_DESTROY 1304 thread_t * this = CURRENT_THREAD; 1294 1305 uint32_t cycle = (uint32_t)hal_get_cycles(); 1295 1306 if( cycle > DEBUG_RPC_VFS_DENTRY_DESTROY ) 1296 printk("\n[DBG] %s : thread %x enter on core[%x,%d]/ cycle %d\n",1297 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle );1307 printk("\n[DBG] %s : thread %x in process %x on core %d enter / cycle %d\n", 1308 __FUNCTION__, this->trdid, this->process->pid, this->core->lid , cycle ); 1298 1309 #endif 1299 1310 … … 1306 1317 1307 1318 // get arguments "dentry" from client RPC descriptor 1308 dentry = (vfs_dentry_t *)(intptr_t)hal_remote_l wd( XPTR( client_cxy , &desc->args[0] ) );1319 dentry = (vfs_dentry_t *)(intptr_t)hal_remote_l64( XPTR( client_cxy , &desc->args[0] ) ); 1309 1320 1310 1321 // call local kernel function … … 1312 1323 1313 1324 // set output argument 1314 hal_remote_s wd( XPTR( client_cxy , &desc->args[1] ) , (uint64_t)error );1325 hal_remote_s64( XPTR( client_cxy , &desc->args[1] ) , (uint64_t)error ); 1315 1326 1316 1327 #if DEBUG_RPC_VFS_DENTRY_DESTROY 1317 1328 cycle = (uint32_t)hal_get_cycles(); 1318 1329 if( cycle > DEBUG_RPC_VFS_DENTRY_DESTROY ) 1319 printk("\n[DBG] %s : thread %x exit / cycle %d\n",1320 __FUNCTION__ , CURRENT_THREAD, cycle );1330 printk("\n[DBG] %s : thread %x in process %x on core %d exit / cycle %d\n", 1331 __FUNCTION__, this->trdid, this->process->pid, this->core->lid, cycle ); 1321 1332 #endif 1322 1333 } … … 1335 1346 { 1336 1347 #if DEBUG_RPC_VFS_FILE_CREATE 1348 thread_t * this = CURRENT_THREAD; 1337 1349 uint32_t cycle = (uint32_t)hal_get_cycles(); 1338 1350 if( cycle > DEBUG_RPC_VFS_FILE_CREATE ) 1339 printk("\n[DBG] %s : thread %x enter on core[%x,%d]/ cycle %d\n",1340 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle );1351 printk("\n[DBG] %s : thread %x in process %x on core %d enter / cycle %d\n", 1352 __FUNCTION__, this->trdid, this->process->pid, this->core->lid , cycle ); 1341 1353 #endif 1342 1354 … … 1363 1375 cycle = (uint32_t)hal_get_cycles(); 1364 1376 if( cycle > DEBUG_RPC_VFS_FILE_CREATE ) 1365 printk("\n[DBG] %s : thread %x exit / cycle %d\n",1366 __FUNCTION__ , CURRENT_THREAD, cycle );1377 printk("\n[DBG] %s : thread %x in process %x on core %d exit / cycle %d\n", 1378 __FUNCTION__, this->trdid, this->process->pid, this->core->lid, cycle ); 1367 1379 #endif 1368 1380 } … … 1372 1384 { 1373 1385 #if DEBUG_RPC_VFS_FILE_CREATE 1386 thread_t * this = CURRENT_THREAD; 1374 1387 uint32_t cycle = (uint32_t)hal_get_cycles(); 1375 1388 if( cycle > DEBUG_RPC_VFS_FILE_CREATE ) 1376 printk("\n[DBG] %s : thread %x enter on core[%x,%d]/ cycle %d\n",1377 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle );1389 printk("\n[DBG] %s : thread %x in process %x on core %d enter / cycle %d\n", 1390 __FUNCTION__, this->trdid, this->process->pid, this->core->lid , cycle ); 1378 1391 #endif 1379 1392 … … 1388 1401 1389 1402 // get arguments "file_attr" and "inode" from client RPC descriptor 1390 inode = (vfs_inode_t *)(intptr_t)hal_remote_l wd( XPTR( client_cxy , &desc->args[0] ) );1391 file_attr = (uint32_t) hal_remote_l wd( XPTR( client_cxy , &desc->args[1] ) );1403 inode = (vfs_inode_t *)(intptr_t)hal_remote_l64( XPTR( client_cxy , &desc->args[0] ) ); 1404 file_attr = (uint32_t) hal_remote_l64( XPTR( client_cxy , &desc->args[1] ) ); 1392 1405 1393 1406 // call local kernel function … … 1397 1410 1398 1411 // set output arguments 1399 hal_remote_s wd( XPTR( client_cxy , &desc->args[2] ) , (uint64_t)file_xp );1400 hal_remote_s wd( XPTR( client_cxy , &desc->args[3] ) , (uint64_t)error );1412 hal_remote_s64( XPTR( client_cxy , &desc->args[2] ) , (uint64_t)file_xp ); 1413 hal_remote_s64( XPTR( client_cxy , &desc->args[3] ) , (uint64_t)error ); 1401 1414 1402 1415 #if DEBUG_RPC_VFS_FILE_CREATE 1403 1416 cycle = (uint32_t)hal_get_cycles(); 1404 1417 if( cycle > DEBUG_RPC_VFS_FILE_CREATE ) 1405 printk("\n[DBG] %s : thread %x exit / cycle %d\n",1406 __FUNCTION__ , CURRENT_THREAD, cycle );1418 printk("\n[DBG] %s : thread %x in process %x on core %d exit / cycle %d\n", 1419 __FUNCTION__, this->trdid, this->process->pid, this->core->lid, cycle ); 1407 1420 #endif 1408 1421 } … … 1417 1430 { 1418 1431 #if DEBUG_RPC_VFS_FILE_DESTROY 1432 thread_t * this = CURRENT_THREAD; 1419 1433 uint32_t cycle = (uint32_t)hal_get_cycles(); 1420 1434 if( cycle > DEBUG_RPC_VFS_FILE_DESTROY ) 1421 printk("\n[DBG] %s : thread %x enter on core[%x,%d]/ cycle %d\n",1422 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle );1435 printk("\n[DBG] %s : thread %x in process %x on core %d enter / cycle %d\n", 1436 __FUNCTION__, this->trdid, this->process->pid, this->core->lid , cycle ); 1423 1437 #endif 1424 1438 … … 1440 1454 cycle = (uint32_t)hal_get_cycles(); 1441 1455 if( cycle > DEBUG_RPC_VFS_FILE_DESTROY ) 1442 printk("\n[DBG] %s : thread %x exit / cycle %d\n",1443 __FUNCTION__ , CURRENT_THREAD, cycle );1456 printk("\n[DBG] %s : thread %x in process %x on core %d exit / cycle %d\n", 1457 __FUNCTION__, this->trdid, this->process->pid, this->core->lid, cycle ); 1444 1458 #endif 1445 1459 } … … 1449 1463 { 1450 1464 #if DEBUG_RPC_VFS_FILE_DESTROY 1465 thread_t * this = CURRENT_THREAD; 1451 1466 uint32_t cycle = (uint32_t)hal_get_cycles(); 1452 1467 if( cycle > DEBUG_RPC_VFS_FILE_DESTROY ) 1453 printk("\n[DBG] %s : thread %x enter on core[%x,%d]/ cycle %d\n",1454 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle );1468 printk("\n[DBG] %s : thread %x in process %x on core %d enter / cycle %d\n", 1469 __FUNCTION__, this->trdid, this->process->pid, this->core->lid , cycle ); 1455 1470 #endif 1456 1471 … … 1462 1477 1463 1478 // get arguments "dentry" from client RPC descriptor 1464 file = (vfs_file_t *)(intptr_t)hal_remote_l wd( XPTR( client_cxy , &desc->args[0] ) );1479 file = (vfs_file_t *)(intptr_t)hal_remote_l64( XPTR( client_cxy , &desc->args[0] ) ); 1465 1480 1466 1481 // call local kernel function … … 1470 1485 cycle = (uint32_t)hal_get_cycles(); 1471 1486 if( cycle > DEBUG_RPC_VFS_FILE_DESTROY ) 1472 printk("\n[DBG] %s : thread %x exit / cycle %d\n",1473 __FUNCTION__ , CURRENT_THREAD, cycle );1487 printk("\n[DBG] %s : thread %x in process %x on core %d exit / cycle %d\n", 1488 __FUNCTION__, this->trdid, this->process->pid, this->core->lid, cycle ); 1474 1489 #endif 1475 1490 } … … 1522 1537 1523 1538 // get arguments "parent", "name", and "child_xp" 1524 parent = (vfs_inode_t*)(intptr_t)hal_remote_l wd(XPTR(client_cxy , &desc->args[0]));1525 name = (char*)(intptr_t) hal_remote_l wd(XPTR(client_cxy , &desc->args[1]));1526 child_xp = (xptr_t) hal_remote_l wd(XPTR(client_cxy , &desc->args[2]));1539 parent = (vfs_inode_t*)(intptr_t)hal_remote_l64(XPTR(client_cxy , &desc->args[0])); 1540 name = (char*)(intptr_t) hal_remote_l64(XPTR(client_cxy , &desc->args[1])); 1541 child_xp = (xptr_t) hal_remote_l64(XPTR(client_cxy , &desc->args[2])); 1527 1542 1528 1543 // get name local copy … … 1534 1549 1535 1550 // set output argument 1536 hal_remote_s wd( XPTR( client_cxy , &desc->args[3] ) , (uint64_t)error );1551 hal_remote_s64( XPTR( client_cxy , &desc->args[3] ) , (uint64_t)error ); 1537 1552 1538 1553 } … … 1577 1592 1578 1593 // get arguments "parent", "name", and "child_xp" 1579 inode = (vfs_inode_t*)(intptr_t)hal_remote_l wd(XPTR(client_cxy , &desc->args[0]));1594 inode = (vfs_inode_t*)(intptr_t)hal_remote_l64(XPTR(client_cxy , &desc->args[0])); 1580 1595 1581 1596 // call the kernel function … … 1583 1598 1584 1599 // set output argument 1585 hal_remote_s wd( XPTR( client_cxy , &desc->args[1] ) , (uint64_t)error );1600 hal_remote_s64( XPTR( client_cxy , &desc->args[1] ) , (uint64_t)error ); 1586 1601 1587 1602 } … … 1636 1651 // get input arguments 1637 1652 mapper = (mapper_t *)(intptr_t)hal_remote_lpt( XPTR( client_cxy , &desc->args[0] ) ); 1638 first = (uint32_t) hal_remote_l w( XPTR( client_cxy , &desc->args[1] ) );1639 index = (uint32_t) hal_remote_l w( XPTR( client_cxy , &desc->args[2] ) );1653 first = (uint32_t) hal_remote_l32 ( XPTR( client_cxy , &desc->args[1] ) ); 1654 index = (uint32_t) hal_remote_l32 ( XPTR( client_cxy , &desc->args[2] ) ); 1640 1655 1641 1656 // call the kernel function … … 1643 1658 1644 1659 // set output argument 1645 hal_remote_s wd( XPTR( client_cxy , &desc->args[3] ) , (uint64_t)cluster );1646 hal_remote_s wd( XPTR( client_cxy , &desc->args[4] ) , (uint64_t)error );1660 hal_remote_s64( XPTR( client_cxy , &desc->args[3] ) , (uint64_t)cluster ); 1661 hal_remote_s64( XPTR( client_cxy , &desc->args[4] ) , (uint64_t)error ); 1647 1662 1648 1663 } … … 1660 1675 { 1661 1676 #if DEBUG_RPC_VMM_GET_VSEG 1677 thread_t * this = CURRENT_THREAD; 1662 1678 uint32_t cycle = (uint32_t)hal_get_cycles(); 1663 1679 if( cycle > DEBUG_RPC_VMM_GET_VSEG ) 1664 printk("\n[DBG] %s : thread %x enter on core[%x,%d]/ cycle %d\n",1665 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle );1680 printk("\n[DBG] %s : thread %x in process %x on core %d enter / cycle %d\n", 1681 __FUNCTION__, this->trdid, this->process->pid, this->core->lid , cycle ); 1666 1682 #endif 1667 1683 … … 1688 1704 cycle = (uint32_t)hal_get_cycles(); 1689 1705 if( cycle > DEBUG_RPC_VMM_GET_VSEG ) 1690 printk("\n[DBG] %s : thread %x exit on core[%x,%d]/ cycle %d\n",1691 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle );1706 printk("\n[DBG] %s : thread %x in process %x on core %d exit / cycle %d\n", 1707 __FUNCTION__, this->trdid, this->process->pid, this->core->lid , cycle ); 1692 1708 #endif 1693 1709 } … … 1697 1713 { 1698 1714 #if DEBUG_RPC_VMM_GET_VSEG 1715 thread_t * this = CURRENT_THREAD; 1699 1716 uint32_t cycle = (uint32_t)hal_get_cycles(); 1700 1717 if( cycle > DEBUG_RPC_VMM_GET_VSEG ) 1701 printk("\n[DBG] %s : thread %x enter on core[%x,%d]/ cycle %d\n",1702 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle );1718 printk("\n[DBG] %s : thread %x in process %x on core %d enter / cycle %d\n", 1719 __FUNCTION__, this->trdid, this->process->pid, this->core->lid , cycle ); 1703 1720 #endif 1704 1721 … … 1714 1731 1715 1732 // get input argument from client RPC descriptor 1716 process = (process_t *)(intptr_t)hal_remote_l wd( XPTR( client_cxy , &desc->args[0] ) );1717 vaddr = (intptr_t)hal_remote_l wd( XPTR( client_cxy , &desc->args[1] ) );1733 process = (process_t *)(intptr_t)hal_remote_l64( XPTR( client_cxy , &desc->args[0] ) ); 1734 vaddr = (intptr_t)hal_remote_l64( XPTR( client_cxy , &desc->args[1] ) ); 1718 1735 1719 1736 // call local kernel function … … 1722 1739 // set output arguments to client RPC descriptor 1723 1740 vseg_xp = XPTR( local_cxy , vseg_ptr ); 1724 hal_remote_s wd( XPTR( client_cxy , &desc->args[2] ) , (uint64_t)vseg_xp );1725 hal_remote_s wd( XPTR( client_cxy , &desc->args[3] ) , (uint64_t)error );1741 hal_remote_s64( XPTR( client_cxy , &desc->args[2] ) , (uint64_t)vseg_xp ); 1742 hal_remote_s64( XPTR( client_cxy , &desc->args[3] ) , (uint64_t)error ); 1726 1743 1727 1744 #if DEBUG_RPC_VMM_GET_VSEG 1728 1745 cycle = (uint32_t)hal_get_cycles(); 1729 1746 if( cycle > DEBUG_RPC_VMM_GET_VSEG ) 1730 printk("\n[DBG] %s : thread %x exit on core[%x,%d]/ cycle %d\n",1731 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle );1747 printk("\n[DBG] %s : thread %x in process %x on core %d exit / cycle %d\n", 1748 __FUNCTION__, this->trdid, this->process->pid, this->core->lid , cycle ); 1732 1749 #endif 1733 1750 } … … 1748 1765 { 1749 1766 #if DEBUG_RPC_VMM_GET_PTE 1767 thread_t * this = CURRENT_THREAD; 1750 1768 uint32_t cycle = (uint32_t)hal_get_cycles(); 1751 1769 if( cycle > DEBUG_RPC_VMM_GET_PTE ) 1752 printk("\n[DBG] %s : thread %x enter on core[%x,%d]/ cycle %d\n",1753 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle );1770 printk("\n[DBG] %s : thread %x in process %x on core %d enter / cycle %d\n", 1771 __FUNCTION__, this->trdid, this->process->pid, this->core->lid , cycle ); 1754 1772 #endif 1755 1773 … … 1778 1796 cycle = (uint32_t)hal_get_cycles(); 1779 1797 if( cycle > DEBUG_RPC_VMM_GET_PTE ) 1780 printk("\n[DBG] %s : thread %x exit on core[%x,%d]/ cycle %d\n",1781 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle );1798 printk("\n[DBG] %s : thread %x in process %x on core %d exit / cycle %d\n", 1799 __FUNCTION__, this->trdid, this->process->pid, this->core->lid , cycle ); 1782 1800 #endif 1783 1801 } … … 1787 1805 { 1788 1806 #if DEBUG_RPC_VMM_GET_PTE 1807 thread_t * this = CURRENT_THREAD; 1789 1808 uint32_t cycle = (uint32_t)hal_get_cycles(); 1790 1809 if( cycle > DEBUG_RPC_VMM_GET_PTE ) 1791 printk("\n[DBG] %s : thread %x enter on core[%x,%d]/ cycle %d\n",1792 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle );1810 printk("\n[DBG] %s : thread %x in process %x on core %d enter / cycle %d\n", 1811 __FUNCTION__, this->trdid, this->process->pid, this->core->lid , cycle ); 1793 1812 #endif 1794 1813 … … 1805 1824 1806 1825 // get input argument "process" & "vpn" from client RPC descriptor 1807 process = (process_t *)(intptr_t)hal_remote_l wd( XPTR( client_cxy , &desc->args[0] ) );1808 vpn = (vpn_t) hal_remote_l wd( XPTR( client_cxy , &desc->args[1] ) );1809 cow = (bool_t) hal_remote_l wd( XPTR( client_cxy , &desc->args[2] ) );1826 process = (process_t *)(intptr_t)hal_remote_l64( XPTR( client_cxy , &desc->args[0] ) ); 1827 vpn = (vpn_t) hal_remote_l64( XPTR( client_cxy , &desc->args[1] ) ); 1828 cow = (bool_t) hal_remote_l64( XPTR( client_cxy , &desc->args[2] ) ); 1810 1829 1811 1830 // call local kernel function … … 1813 1832 1814 1833 // set output argument "attr" & "ppn" to client RPC descriptor 1815 hal_remote_s wd( XPTR( client_cxy , &desc->args[3] ) , (uint64_t)attr );1816 hal_remote_s wd( XPTR( client_cxy , &desc->args[4] ) , (uint64_t)ppn );1817 hal_remote_s wd( XPTR( client_cxy , &desc->args[5] ) , (uint64_t)error );1834 hal_remote_s64( XPTR( client_cxy , &desc->args[3] ) , (uint64_t)attr ); 1835 hal_remote_s64( XPTR( client_cxy , &desc->args[4] ) , (uint64_t)ppn ); 1836 hal_remote_s64( XPTR( client_cxy , &desc->args[5] ) , (uint64_t)error ); 1818 1837 1819 1838 #if DEBUG_RPC_VMM_GET_PTE 1820 1839 cycle = (uint32_t)hal_get_cycles(); 1821 1840 if( cycle > DEBUG_RPC_VMM_GET_PTE ) 1822 printk("\n[DBG] %s : thread %x exit on core[%x,%d]/ cycle %d\n",1823 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle );1841 printk("\n[DBG] %s : thread %x in process %x on core %d exit / cycle %d\n", 1842 __FUNCTION__, this->trdid, this->process->pid, this->core->lid , cycle ); 1824 1843 #endif 1825 1844 } … … 1834 1853 xptr_t * buf_xp ) // out 1835 1854 { 1855 #if DEBUG_RPC_KCM_ALLOC 1856 thread_t * this = CURRENT_THREAD; 1857 uint32_t cycle = (uint32_t)hal_get_cycles(); 1858 if( cycle > DEBUG_RPC_KCM_ALLOC ) 1859 printk("\n[DBG] %s : thread %x in process %x on core %d enter / cycle %d\n", 1860 __FUNCTION__, this->trdid, this->process->pid, this->core->lid , cycle ); 1861 #endif 1862 1836 1863 assert( (cxy != local_cxy) , "target cluster is not remote\n"); 1837 1864 … … 1851 1878 *buf_xp = (xptr_t)rpc.args[1]; 1852 1879 1880 #if DEBUG_RPC_KCM_ALLOC 1881 cycle = (uint32_t)hal_get_cycles(); 1882 if( cycle > DEBUG_RPC_KCM_ALLOC ) 1883 printk("\n[DBG] %s : thread %x in process %x on core %d exit / cycle %d\n", 1884 __FUNCTION__, this->trdid, this->process->pid, this->core->lid , cycle ); 1885 #endif 1853 1886 } 1854 1887 … … 1856 1889 void rpc_kcm_alloc_server( xptr_t xp ) 1857 1890 { 1891 #if DEBUG_RPC_KCM_ALLOC 1892 thread_t * this = CURRENT_THREAD; 1893 uint32_t cycle = (uint32_t)hal_get_cycles(); 1894 if( cycle > DEBUG_RPC_KCM_ALLOC ) 1895 printk("\n[DBG] %s : thread %x in process %x on core %d enter / cycle %d\n", 1896 __FUNCTION__, this->trdid, this->process->pid, this->core->lid , cycle ); 1897 #endif 1898 1858 1899 // get client cluster identifier and pointer on RPC descriptor 1859 1900 cxy_t client_cxy = GET_CXY( xp ); … … 1861 1902 1862 1903 // get input argument "kmem_type" from client RPC descriptor 1863 uint32_t kmem_type = (uint32_t)hal_remote_l wd( XPTR( client_cxy , &desc->args[0] ) );1904 uint32_t kmem_type = (uint32_t)hal_remote_l64( XPTR( client_cxy , &desc->args[0] ) ); 1864 1905 1865 1906 // allocates memory for kcm … … 1871 1912 // set output argument 1872 1913 xptr_t buf_xp = XPTR( local_cxy , buf_ptr ); 1873 hal_remote_swd( XPTR( client_cxy , &desc->args[1] ) , (uint64_t)buf_xp ); 1874 1914 hal_remote_s64( XPTR( client_cxy , &desc->args[1] ) , (uint64_t)buf_xp ); 1915 1916 #if DEBUG_RPC_KCM_ALLOC 1917 cycle = (uint32_t)hal_get_cycles(); 1918 if( cycle > DEBUG_RPC_KCM_ALLOC ) 1919 printk("\n[DBG] %s : thread %x in process %x on core %d exit / cycle %d\n", 1920 __FUNCTION__, this->trdid, this->process->pid, this->core->lid , cycle ); 1921 #endif 1875 1922 } 1876 1923 … … 1884 1931 uint32_t kmem_type ) // in 1885 1932 { 1933 #if DEBUG_RPC_KCM_FREE 1934 thread_t * this = CURRENT_THREAD; 1935 uint32_t cycle = (uint32_t)hal_get_cycles(); 1936 if( cycle > DEBUG_RPC_KCM_FREE ) 1937 printk("\n[DBG] %s : thread %x in process %x on core %d enter / cycle %d\n", 1938 __FUNCTION__, this->trdid, this->process->pid, this->core->lid , cycle ); 1939 #endif 1940 1886 1941 assert( (cxy != local_cxy) , "target cluster is not remote\n"); 1887 1942 … … 1899 1954 rpc_send( cxy , &rpc ); 1900 1955 1956 #if DEBUG_RPC_KCM_FREE 1957 cycle = (uint32_t)hal_get_cycles(); 1958 if( cycle > DEBUG_RPC_KCM_FREE ) 1959 printk("\n[DBG] %s : thread %x in process %x on core %d exit / cycle %d\n", 1960 __FUNCTION__, this->trdid, this->process->pid, this->core->lid , cycle ); 1961 #endif 1901 1962 } 1902 1963 … … 1904 1965 void rpc_kcm_free_server( xptr_t xp ) 1905 1966 { 1967 #if DEBUG_RPC_KCM_FREE 1968 thread_t * this = CURRENT_THREAD; 1969 uint32_t cycle = (uint32_t)hal_get_cycles(); 1970 if( cycle > DEBUG_RPC_KCM_FREE ) 1971 printk("\n[DBG] %s : thread %x in process %x on core %d enter / cycle %d\n", 1972 __FUNCTION__, this->trdid, this->process->pid, this->core->lid , cycle ); 1973 #endif 1974 1906 1975 // get client cluster identifier and pointer on RPC descriptor 1907 1976 cxy_t client_cxy = GET_CXY( xp ); … … 1909 1978 1910 1979 // get input arguments "buf" and "kmem_type" from client RPC descriptor 1911 void * buf = (void *)(intptr_t)hal_remote_l wd( XPTR( client_cxy , &desc->args[0] ) );1912 uint32_t kmem_type = (uint32_t)hal_remote_l wd( XPTR( client_cxy , &desc->args[1] ) );1980 void * buf = (void *)(intptr_t)hal_remote_l64( XPTR( client_cxy , &desc->args[0] ) ); 1981 uint32_t kmem_type = (uint32_t)hal_remote_l64( XPTR( client_cxy , &desc->args[1] ) ); 1913 1982 1914 1983 // releases memory … … 1918 1987 kmem_free( &req ); 1919 1988 1989 #if DEBUG_RPC_KCM_FREE 1990 cycle = (uint32_t)hal_get_cycles(); 1991 if( cycle > DEBUG_RPC_KCM_FREE ) 1992 printk("\n[DBG] %s : thread %x in process %x on core %d exit / cycle %d\n", 1993 __FUNCTION__, this->trdid, this->process->pid, this->core->lid , cycle ); 1994 #endif 1920 1995 } 1921 1996 … … 1975 2050 1976 2051 // get arguments from client RPC descriptor 1977 mapper = (mapper_t *)(intptr_t)hal_remote_l wd( XPTR( client_cxy , &desc->args[0] ) );1978 to_buffer = hal_remote_l wd( XPTR( client_cxy , &desc->args[1] ) );1979 is_user = hal_remote_l wd( XPTR( client_cxy , &desc->args[2] ) );1980 file_offset = hal_remote_l wd( XPTR( client_cxy , &desc->args[3] ) );1981 size = hal_remote_l wd( XPTR( client_cxy , &desc->args[5] ) );2052 mapper = (mapper_t *)(intptr_t)hal_remote_l64( XPTR( client_cxy , &desc->args[0] ) ); 2053 to_buffer = hal_remote_l64( XPTR( client_cxy , &desc->args[1] ) ); 2054 is_user = hal_remote_l64( XPTR( client_cxy , &desc->args[2] ) ); 2055 file_offset = hal_remote_l64( XPTR( client_cxy , &desc->args[3] ) ); 2056 size = hal_remote_l64( XPTR( client_cxy , &desc->args[5] ) ); 1982 2057 1983 2058 // call local kernel function 1984 2059 if( is_user ) 1985 2060 { 1986 user_buffer = (void *)(intptr_t)hal_remote_l wd( XPTR( client_cxy , &desc->args[4] ) );2061 user_buffer = (void *)(intptr_t)hal_remote_l64( XPTR( client_cxy , &desc->args[4] ) ); 1987 2062 1988 2063 error = mapper_move_user( mapper, … … 1994 2069 else 1995 2070 { 1996 kern_buffer = (xptr_t)hal_remote_l wd( XPTR( client_cxy , &desc->args[4] ) );2071 kern_buffer = (xptr_t)hal_remote_l64( XPTR( client_cxy , &desc->args[4] ) ); 1997 2072 1998 2073 error = mapper_move_kernel( mapper, … … 2004 2079 2005 2080 // set output argument to client RPC descriptor 2006 hal_remote_s wd( XPTR( client_cxy , &desc->args[6] ) , (uint64_t)error );2081 hal_remote_s64( XPTR( client_cxy , &desc->args[6] ) , (uint64_t)error ); 2007 2082 2008 2083 } … … 2046 2121 2047 2122 // get input arguments from client RPC descriptor 2048 mapper_t * mapper = (mapper_t *)(intptr_t)hal_remote_l wd( XPTR( cxy , &desc->args[0] ) );2049 uint32_t index = (uint32_t) hal_remote_l wd( XPTR( cxy , &desc->args[1] ) );2123 mapper_t * mapper = (mapper_t *)(intptr_t)hal_remote_l64( XPTR( cxy , &desc->args[0] ) ); 2124 uint32_t index = (uint32_t) hal_remote_l64( XPTR( cxy , &desc->args[1] ) ); 2050 2125 2051 2126 // call local pmem allocator … … 2053 2128 2054 2129 // set output arguments into client RPC descriptor 2055 hal_remote_s wd( XPTR( cxy , &desc->args[1] ) , (uint64_t)(intptr_t)page );2130 hal_remote_s64( XPTR( cxy , &desc->args[1] ) , (uint64_t)(intptr_t)page ); 2056 2131 2057 2132 } … … 2107 2182 2108 2183 // get input arguments from client RPC descriptor 2109 process_t * process = (process_t *)(intptr_t)hal_remote_l wd( XPTR(cxy , &desc->args[0]));2110 vseg_type_t type = (vseg_type_t)(uint32_t)hal_remote_l wd( XPTR(cxy , &desc->args[1]));2111 intptr_t base = (intptr_t) hal_remote_l wd( XPTR(cxy , &desc->args[2]));2112 uint32_t size = (uint32_t) hal_remote_l wd( XPTR(cxy , &desc->args[3]));2113 uint32_t file_offset = (uint32_t) hal_remote_l wd( XPTR(cxy , &desc->args[4]));2114 uint32_t file_size = (uint32_t) hal_remote_l wd( XPTR(cxy , &desc->args[5]));2115 xptr_t mapper_xp = (xptr_t) hal_remote_l wd( XPTR(cxy , &desc->args[6]));2116 cxy_t vseg_cxy = (cxy_t)(uint32_t) hal_remote_l wd( XPTR(cxy , &desc->args[7]));2184 process_t * process = (process_t *)(intptr_t)hal_remote_l64( XPTR(cxy , &desc->args[0])); 2185 vseg_type_t type = (vseg_type_t)(uint32_t)hal_remote_l64( XPTR(cxy , &desc->args[1])); 2186 intptr_t base = (intptr_t) hal_remote_l64( XPTR(cxy , &desc->args[2])); 2187 uint32_t size = (uint32_t) hal_remote_l64( XPTR(cxy , &desc->args[3])); 2188 uint32_t file_offset = (uint32_t) hal_remote_l64( XPTR(cxy , &desc->args[4])); 2189 uint32_t file_size = (uint32_t) hal_remote_l64( XPTR(cxy , &desc->args[5])); 2190 xptr_t mapper_xp = (xptr_t) hal_remote_l64( XPTR(cxy , &desc->args[6])); 2191 cxy_t vseg_cxy = (cxy_t)(uint32_t) hal_remote_l64( XPTR(cxy , &desc->args[7])); 2117 2192 2118 2193 // call local kernel function … … 2127 2202 2128 2203 // set output arguments into client RPC descriptor 2129 hal_remote_s wd( XPTR( cxy , &desc->args[8] ) , (uint64_t)(intptr_t)vseg );2204 hal_remote_s64( XPTR( cxy , &desc->args[8] ) , (uint64_t)(intptr_t)vseg ); 2130 2205 2131 2206 } … … 2169 2244 2170 2245 // get input arguments from client RPC descriptor 2171 process = (process_t *)(intptr_t)hal_remote_l wd( XPTR(cxy , &desc->args[0]));2246 process = (process_t *)(intptr_t)hal_remote_l64( XPTR(cxy , &desc->args[0])); 2172 2247 2173 2248 // call local kernel function … … 2213 2288 2214 2289 // get input arguments from client RPC descriptor 2215 process = (process_t *)(intptr_t)hal_remote_l wd( XPTR(cxy , &desc->args[0]));2216 detailed = (bool_t) hal_remote_l wd( XPTR(cxy , &desc->args[1]));2290 process = (process_t *)(intptr_t)hal_remote_l64( XPTR(cxy , &desc->args[0])); 2291 detailed = (bool_t) hal_remote_l64( XPTR(cxy , &desc->args[1])); 2217 2292 2218 2293 // call local kernel function -
trunk/kernel/kern/rpc.h
r503 r564 29 29 #include <hal_atomic.h> 30 30 #include <bits.h> 31 #include <spinlock.h>32 31 #include <vseg.h> 33 32 #include <remote_fifo.h> … … 150 149 151 150 /*********************************************************************************** 152 * This function is the entry point for RPC handling on the server cluster. 153 * It is executed by the core receiving the IPI sent by the client thread. 154 * - If the RPC FIFO is empty, it deschedules. 155 * - If the RPC FIFO is not empty, it checks if it exist a non-blocked RPC thread 156 * in the cluster, creates a new one if required, and deschedule to allow 157 * the RPC thead to execute. 158 **********************************************************************************/ 159 void rpc_check( void ); 160 161 /*********************************************************************************** 162 * This function contains the loop to execute all pending RPCs on the server side. 163 * It is called by the rpc_thread_func() function with irq disabled, and after 164 * RPC_FIFO ownership acquisition. 165 *********************************************************************************** 166 * @ rpc_fifo : pointer on the local RPC fifo 167 **********************************************************************************/ 168 void rpc_execute_all( remote_fifo_t * rpc_fifo ); 169 170 /*********************************************************************************** 171 * This function contains the infinite loop executed by a RPC thread. 151 * This function contains the infinite loop executed by a RPC thread, 152 * to handle all pending RPCs registered in the RPC fifo attached to a given core. 172 153 **********************************************************************************/ 173 154 void rpc_thread_func( void ); … … 177 158 **********************************************************************************/ 178 159 void __attribute__((noinline)) rpc_undefined( xptr_t xp __attribute__ ((unused)) ); 179 180 160 181 161 -
trunk/kernel/kern/scheduler.c
r551 r564 2 2 * scheduler.c - Core scheduler implementation. 3 3 * 4 * Author Alain Greiner (2016 )4 * Author Alain Greiner (2016,2017,2018) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 36 36 37 37 /////////////////////////////////////////////////////////////////////////////////////////// 38 // Externglobal variables38 // global variables 39 39 /////////////////////////////////////////////////////////////////////////////////////////// 40 40 41 uint32_t idle_thread_count; 42 uint32_t idle_thread_count_active; 43 44 extern chdev_directory_t chdev_dir; // allocated in kernel_init.c file 45 extern uint32_t switch_save_sr[]; // allocated in kernel_init.c file 46 47 //////////////////////////////// 48 void sched_init( core_t * core ) 49 { 50 scheduler_t * sched = &core->scheduler; 51 52 sched->u_threads_nr = 0; 53 sched->k_threads_nr = 0; 54 55 sched->current = CURRENT_THREAD; 56 sched->idle = NULL; // initialized in kernel_init() 57 sched->u_last = NULL; // initialized in sched_register_thread() 58 sched->k_last = NULL; // initialized in sched_register_thread() 59 60 // initialise threads lists 61 list_root_init( &sched->u_root ); 62 list_root_init( &sched->k_root ); 63 64 // init spinlock 65 spinlock_init( &sched->lock ); 66 67 sched->req_ack_pending = false; // no pending request 68 sched->trace = false; // context switches trace desactivated 69 70 } // end sched_init() 71 72 //////////////////////////////////////////// 73 void sched_register_thread( core_t * core, 74 thread_t * thread ) 75 { 76 scheduler_t * sched = &core->scheduler; 77 thread_type_t type = thread->type; 78 79 // take lock protecting sheduler lists 80 uint32_t irq_state; 81 spinlock_lock_busy( &sched->lock, &irq_state ); 82 83 if( type == THREAD_USER ) 84 { 85 list_add_last( &sched->u_root , &thread->sched_list ); 86 sched->u_threads_nr++; 87 if( sched->u_last == NULL ) sched->u_last = &thread->sched_list; 88 } 89 else // kernel thread 90 { 91 list_add_last( &sched->k_root , &thread->sched_list ); 92 sched->k_threads_nr++; 93 if( sched->k_last == NULL ) sched->k_last = &thread->sched_list; 94 } 95 96 // release lock 97 hal_fence(); 98 spinlock_unlock_busy( &sched->lock, irq_state); 99 100 } // end sched_register_thread() 101 102 ////////////////////////////////////////////// 41 extern chdev_directory_t chdev_dir; // allocated in kernel_init.c 42 43 /////////////////////////////////////////////////////////////////////////////////////////// 44 // private functions 45 /////////////////////////////////////////////////////////////////////////////////////////// 46 47 48 //////////////////////////////////////////////////////////////////////////////////////////// 49 // This static function does NOT modify the scheduler state. 50 // It just select a thread in the list of attached threads, implementing the following 51 // three steps policy: 52 // 1) It scan the list of kernel threads, from the next thread after the last executed one, 53 // and returns the first runnable found : not IDLE, not blocked, client queue not empty. 54 // It can be the current thread. 55 // 2) If no kernel thread found, it scan the list of user thread, from the next thread after 56 // the last executed one, and returns the first runable found : not blocked. 57 // It can be the current thread. 58 // 3) If no runable thread found, it returns the idle thread. 59 //////////////////////////////////////////////////////////////////////////////////////////// 60 // @ sched : local pointer on scheduler. 61 // @ returns pointer on selected thread descriptor 62 //////////////////////////////////////////////////////////////////////////////////////////// 103 63 thread_t * sched_select( scheduler_t * sched ) 104 64 { … … 110 70 uint32_t count; 111 71 112 // take lock protecting sheduler lists113 spinlock_lock( &sched->lock );114 115 72 // first : scan the kernel threads list if not empty 116 73 if( list_is_empty( &sched->k_root ) == false ) … … 124 81 while( done == false ) 125 82 { 126 assert( (count < sched->k_threads_nr), "bad kernel threads list" ); 83 84 // check kernel threads list 85 assert( (count < sched->k_threads_nr), 86 "bad kernel threads list" ); 127 87 128 88 // get next entry in kernel list … … 140 100 141 101 // select kernel thread if non blocked and non THREAD_IDLE 142 if( (thread->blocked == 0) && (thread->type != THREAD_IDLE) ) 143 { 144 spinlock_unlock( &sched->lock ); 145 return thread; 146 } 102 if( (thread->blocked == 0) && (thread->type != THREAD_IDLE) ) return thread; 103 147 104 } // end loop on kernel threads 148 105 } // end kernel threads … … 159 116 while( done == false ) 160 117 { 161 assert( (count < sched->u_threads_nr), "bad user threads list" ); 118 119 // check user threads list 120 assert( (count < sched->u_threads_nr), 121 "bad user threads list" ); 162 122 163 123 // get next entry in user list … … 175 135 176 136 // select thread if non blocked 177 if( thread->blocked == 0 ) 178 { 179 spinlock_unlock( &sched->lock ); 180 return thread; 181 } 137 if( thread->blocked == 0 ) return thread; 138 182 139 } // end loop on user threads 183 140 } // end user threads 184 141 185 142 // third : return idle thread if no other runnable thread 186 spinlock_unlock( &sched->lock );187 143 return sched->idle; 188 144 189 145 } // end sched_select() 190 146 191 /////////////////////////////////////////// 192 void sched_handle_signals( core_t * core ) 147 //////////////////////////////////////////////////////////////////////////////////////////// 148 // This static function is the only function that can remove a thread from the scheduler. 149 // It is private, because it is called by the sched_yield() public function. 150 // It scan all threads attached to a given scheduler, and executes the relevant 151 // actions for pending requests: 152 // - REQ_ACK : it checks that target thread is blocked, decrements the response counter 153 // to acknowledge the client thread, and reset the pending request. 154 // - REQ_DELETE : it detach the target thread from parent if attached, detach it from 155 // the process, remove it from scheduler, release memory allocated to thread descriptor, 156 // and destroy the process descriptor it the target thread was the last thread. 157 //////////////////////////////////////////////////////////////////////////////////////////// 158 // @ core : local pointer on the core descriptor. 159 //////////////////////////////////////////////////////////////////////////////////////////// 160 static void sched_handle_signals( core_t * core ) 193 161 { 194 162 … … 197 165 thread_t * thread; 198 166 process_t * process; 199 bool_t last_thread; 167 scheduler_t * sched; 168 bool_t last; 200 169 201 170 // get pointer on scheduler 202 sched uler_t * sched= &core->scheduler;171 sched = &core->scheduler; 203 172 204 173 // get pointer on user threads root 205 174 root = &sched->u_root; 206 175 207 // take lock protecting threads lists208 spinlock_lock( &sched->lock );209 210 176 // We use a while to scan the user threads, to control the iterator increment, 211 // because some threads will be destroyed, and we cannot use a LIST_FOREACH()177 // because some threads will be destroyed, and we want not use a LIST_FOREACH() 212 178 213 179 // initialise list iterator … … 226 192 if( thread->flags & THREAD_FLAG_REQ_ACK ) 227 193 { 228 // check thread blocked 229 assert( (thread->blocked & THREAD_BLOCKED_GLOBAL) , 230 "thread not blocked" ); 194 195 // check thread blocked 196 assert( (thread->blocked & THREAD_BLOCKED_GLOBAL) , 197 "thread not blocked" ); 231 198 232 199 // decrement response counter … … 237 204 } 238 205 239 // handle REQ_DELETE 240 if( thread->flags & THREAD_FLAG_REQ_DELETE)206 // handle REQ_DELETE only if target thread != calling thread 207 if( (thread->flags & THREAD_FLAG_REQ_DELETE) && (thread != CURRENT_THREAD) ) 241 208 { 242 209 // get thread process descriptor … … 246 213 if( thread->core->fpu_owner == thread ) thread->core->fpu_owner = NULL; 247 214 248 // remove thread from scheduler (scheduler lock already taken) 215 // take lock protecting sheduler state 216 busylock_acquire( &sched->lock ); 217 218 // update scheduler state 249 219 uint32_t threads_nr = sched->u_threads_nr; 250 251 assert( (threads_nr != 0) , "u_threads_nr cannot be 0\n" );252 253 220 sched->u_threads_nr = threads_nr - 1; 254 221 list_unlink( &thread->sched_list ); … … 269 236 } 270 237 238 // release lock protecting scheduler state 239 busylock_release( &sched->lock ); 240 271 241 // delete thread descriptor 272 last _thread= thread_destroy( thread );242 last = thread_destroy( thread ); 273 243 274 244 #if DEBUG_SCHED_HANDLE_SIGNALS … … 279 249 #endif 280 250 // destroy process descriptor if no more threads 281 if( last _thread)251 if( last ) 282 252 { 283 253 // delete process … … 293 263 } 294 264 } 265 } // end sched_handle_signals() 266 267 //////////////////////////////////////////////////////////////////////////////////////////// 268 // This static function is called by the sched_yield function when the RFC_FIFO 269 // associated to the core is not empty. 270 // It checks if it exists an idle (blocked) RPC thread for this core, and unblock 271 // it if found. It creates a new RPC thread if no idle RPC thread is found. 272 //////////////////////////////////////////////////////////////////////////////////////////// 273 // @ sched : local pointer on scheduler. 274 //////////////////////////////////////////////////////////////////////////////////////////// 275 void sched_rpc_activate( scheduler_t * sched ) 276 { 277 error_t error; 278 thread_t * thread; 279 list_entry_t * iter; 280 lid_t lid = CURRENT_THREAD->core->lid; 281 bool_t found = false; 282 283 // search one IDLE RPC thread associated to the selected core 284 LIST_FOREACH( &sched->k_root , iter ) 285 { 286 thread = LIST_ELEMENT( iter , thread_t , sched_list ); 287 if( (thread->type == THREAD_RPC) && (thread->blocked == THREAD_BLOCKED_IDLE ) ) 288 { 289 // exit loop 290 found = true; 291 break; 292 } 293 } 294 295 if( found == false ) // create new RPC thread 296 { 297 error = thread_kernel_create( &thread, 298 THREAD_RPC, 299 &rpc_thread_func, 300 NULL, 301 lid ); 302 // check memory 303 if ( error ) 304 { 305 printk("\n[WARNING] in %s : no memory to create a RPC thread in cluster %x\n", 306 __FUNCTION__, local_cxy ); 307 } 308 else 309 { 310 // unblock created RPC thread 311 thread->blocked = 0; 312 313 // update RPC threads counter 314 hal_atomic_add( &LOCAL_CLUSTER->rpc_threads[lid] , 1 ); 315 316 #if DEBUG_SCHED_RPC_ACTIVATE 317 uint32_t cycle = (uint32_t)hal_get_cycles(); 318 if( DEBUG_SCHED_RPC_ACTIVATE < cycle ) 319 printk("\n[DBG] %s : new RPC thread %x created for core[%x,%d] / cycle %d\n", 320 __FUNCTION__, thread->trdid, local_cxy, lid, cycle ); 321 #endif 322 } 323 } 324 else // RPC thread found => unblock it 325 { 326 // unblock found RPC thread 327 thread_unblock( XPTR( local_cxy , thread ) , THREAD_BLOCKED_IDLE ); 328 329 #if DEBUG_SCHED_RPC_ACTIVATE 330 uint32_t cycle = (uint32_t)hal_get_cycles(); 331 if( DEBUG_SCHED_RPC_ACTIVATE < cycle ) 332 printk("\n[DBG] %s : idle RPC thread %x unblocked for core[%x,%d] / cycle %d\n", 333 __FUNCTION__, thread->trdid, local_cxy, lid, cycle ); 334 #endif 335 336 } 337 338 } // end sched_rpc_activate() 339 340 341 342 /////////////////////////////////////////////////////////////////////////////////////////// 343 // public functions 344 /////////////////////////////////////////////////////////////////////////////////////////// 345 346 //////////////////////////////// 347 void sched_init( core_t * core ) 348 { 349 scheduler_t * sched = &core->scheduler; 350 351 sched->u_threads_nr = 0; 352 sched->k_threads_nr = 0; 353 354 sched->current = CURRENT_THREAD; 355 sched->idle = NULL; // initialized in kernel_init() 356 sched->u_last = NULL; // initialized in sched_register_thread() 357 sched->k_last = NULL; // initialized in sched_register_thread() 358 359 // initialise threads lists 360 list_root_init( &sched->u_root ); 361 list_root_init( &sched->k_root ); 362 363 // init lock 364 busylock_init( &sched->lock , LOCK_SCHED_STATE ); 365 366 sched->req_ack_pending = false; // no pending request 367 sched->trace = false; // context switches trace desactivated 368 369 } // end sched_init() 370 371 //////////////////////////////////////////// 372 void sched_register_thread( core_t * core, 373 thread_t * thread ) 374 { 375 scheduler_t * sched = &core->scheduler; 376 thread_type_t type = thread->type; 377 378 // take lock protecting sheduler state 379 busylock_acquire( &sched->lock ); 380 381 if( type == THREAD_USER ) 382 { 383 list_add_last( &sched->u_root , &thread->sched_list ); 384 sched->u_threads_nr++; 385 if( sched->u_last == NULL ) sched->u_last = &thread->sched_list; 386 } 387 else // kernel thread 388 { 389 list_add_last( &sched->k_root , &thread->sched_list ); 390 sched->k_threads_nr++; 391 if( sched->k_last == NULL ) sched->k_last = &thread->sched_list; 392 } 295 393 296 394 // release lock 297 hal_fence(); 298 spinlock_unlock( &sched->lock ); 299 300 } // end sched_handle_signals() 301 302 //////////////////////////////// 395 busylock_release( &sched->lock ); 396 397 } // end sched_register_thread() 398 399 ////////////////////////////////////// 303 400 void sched_yield( const char * cause ) 304 401 { 305 thread_t * next; 306 thread_t * current = CURRENT_THREAD; 307 core_t * core = current->core; 308 scheduler_t * sched = &core->scheduler; 402 thread_t * next; 403 thread_t * current = CURRENT_THREAD; 404 core_t * core = current->core; 405 lid_t lid = core->lid; 406 scheduler_t * sched = &core->scheduler; 407 remote_fifo_t * fifo = &LOCAL_CLUSTER->rpc_fifo[lid]; 309 408 310 409 #if (DEBUG_SCHED_YIELD & 0x1) 311 if( sched->trace ) 312 sched_display( core->lid ); 410 if( sched->trace ) sched_display( lid ); 313 411 #endif 314 412 315 // delay the yield if current thread has locks 316 if( (current->local_locks != 0) || (current->remote_locks != 0) ) 317 { 318 current->flags |= THREAD_FLAG_SCHED; 319 return; 320 } 321 322 // enter critical section / save SR in current thread descriptor 323 hal_disable_irq( &CURRENT_THREAD->save_sr ); 324 325 // loop on threads to select next thread 413 // check current thread busylocks counter 414 assert( (current->busylocks == 0), 415 "thread cannot yield : busylocks = %d\n", current->busylocks ); 416 417 // activate or create an RPC thread if RPC_FIFO non empty 418 if( remote_fifo_is_empty( fifo ) == false ) sched_rpc_activate( sched ); 419 420 // disable IRQs / save SR in current thread descriptor 421 hal_disable_irq( ¤t->save_sr ); 422 423 // take lock protecting sheduler state 424 busylock_acquire( &sched->lock ); 425 426 // select next thread 326 427 next = sched_select( sched ); 327 428 328 329 330 "kernel stack overflow for thread %x on core[%x,%d] \n", next, local_cxy, core->lid );331 332 333 334 335 336 337 338 339 next->trdid , thread_type_str(next->type) , local_cxy , core->lid );429 // check next thread kernel_stack overflow 430 assert( (next->signature == THREAD_SIGNATURE), 431 "kernel stack overflow for thread %x on core[%x,%d] \n", next, local_cxy, lid ); 432 433 // check next thread attached to same core as the calling thread 434 assert( (next->core == current->core), 435 "next core %x != current core %x\n", next->core, current->core ); 436 437 // check next thread not blocked when type != IDLE 438 assert( ((next->blocked == 0) || (next->type == THREAD_IDLE)) , 439 "next thread %x (%s) is blocked on core[%x,%d]\n", 440 next->trdid , thread_type_str(next->type) , local_cxy , lid ); 340 441 341 442 // switch contexts and update scheduler state if next != current 342 443 if( next != current ) 343 444 { 445 // update scheduler 446 sched->current = next; 447 if( next->type == THREAD_USER ) sched->u_last = &next->sched_list; 448 else sched->k_last = &next->sched_list; 449 450 // handle FPU ownership 451 if( next->type == THREAD_USER ) 452 { 453 if( next == current->core->fpu_owner ) hal_fpu_enable(); 454 else hal_fpu_disable(); 455 } 456 457 // release lock protecting scheduler state 458 busylock_release( &sched->lock ); 344 459 345 460 #if DEBUG_SCHED_YIELD … … 347 462 printk("\n[DBG] %s : core[%x,%d] / cause = %s\n" 348 463 " thread %x (%s) (%x,%x) => thread %x (%s) (%x,%x) / cycle %d\n", 349 __FUNCTION__, local_cxy, core->lid, cause,464 __FUNCTION__, local_cxy, lid, cause, 350 465 current, thread_type_str(current->type), current->process->pid, current->trdid,next , 351 466 thread_type_str(next->type) , next->process->pid , next->trdid , (uint32_t)hal_get_cycles() ); 352 467 #endif 353 468 354 // update scheduler355 sched->current = next;356 if( next->type == THREAD_USER ) sched->u_last = &next->sched_list;357 else sched->k_last = &next->sched_list;358 359 // handle FPU ownership360 if( next->type == THREAD_USER )361 {362 if( next == current->core->fpu_owner ) hal_fpu_enable();363 else hal_fpu_disable();364 }365 366 469 // switch CPU from current thread context to new thread context 367 470 hal_do_cpu_switch( current->cpu_context, next->cpu_context ); … … 369 472 else 370 473 { 474 // release lock protecting scheduler state 475 busylock_release( &sched->lock ); 371 476 372 477 #if DEBUG_SCHED_YIELD … … 374 479 printk("\n[DBG] %s : core[%x,%d] / cause = %s\n" 375 480 " thread %x (%s) (%x,%x) continue / cycle %d\n", 376 __FUNCTION__, local_cxy, core->lid, cause, current, thread_type_str(current->type),481 __FUNCTION__, local_cxy, lid, cause, current, thread_type_str(current->type), 377 482 current->process->pid, current->trdid, (uint32_t)hal_get_cycles() ); 378 483 #endif … … 394 499 list_entry_t * iter; 395 500 thread_t * thread; 396 uint32_t save_sr; 397 398 assert( (lid < LOCAL_CLUSTER->cores_nr), "illegal core index %d\n", lid); 501 502 // check lid 503 assert( (lid < LOCAL_CLUSTER->cores_nr), 504 "illegal core index %d\n", lid); 399 505 400 506 core_t * core = &LOCAL_CLUSTER->core_tbl[lid]; … … 406 512 chdev_t * txt0_ptr = GET_PTR( txt0_xp ); 407 513 408 // get extended pointer on remote TXT0 chdevlock514 // get extended pointer on remote TXT0 lock 409 515 xptr_t lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock ); 410 516 411 // get TXT0 lock in busy waiting mode412 remote_ spinlock_lock_busy( lock_xp , &save_sr);517 // get TXT0 lock 518 remote_busylock_acquire( lock_xp ); 413 519 414 520 nolock_printk("\n***** threads on core[%x,%d] / current %x / cycle %d\n", … … 443 549 444 550 // release TXT0 lock 445 remote_ spinlock_unlock_busy( lock_xp , save_sr);551 remote_busylock_release( lock_xp ); 446 552 447 553 } // end sched_display() … … 452 558 { 453 559 thread_t * thread; 454 uint32_t save_sr; 455 456 // check cxy 457 bool_t undefined = cluster_is_undefined( cxy ); 458 assert( (undefined == false), "illegal cluster %x\n", cxy ); 459 460 // check lid 461 uint32_t cores = hal_remote_lw( XPTR( cxy , &LOCAL_CLUSTER->cores_nr ) ); 462 assert( (lid < cores), "illegal core index %d\n", lid); 560 561 // check cxy 562 assert( (cluster_is_undefined( cxy ) == false), 563 "illegal cluster %x\n", cxy ); 564 565 // check lid 566 assert( (lid < hal_remote_l32( XPTR( cxy , &LOCAL_CLUSTER->cores_nr ) ) ), 567 "illegal core index %d\n", lid ); 463 568 464 569 // get local pointer on target scheduler … … 481 586 xptr_t lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock ); 482 587 483 // get TXT0 lock in busy waiting mode484 remote_ spinlock_lock_busy( lock_xp , &save_sr);588 // get TXT0 lock 589 remote_busylock_acquire( lock_xp ); 485 590 486 591 // display header … … 495 600 496 601 // get relevant thead info 497 thread_type_t type = hal_remote_l w( XPTR( cxy , &thread->type ) );498 trdid_t trdid = hal_remote_l w( XPTR( cxy , &thread->trdid ) );499 uint32_t blocked = hal_remote_l w( XPTR( cxy , &thread->blocked ) );500 uint32_t flags = hal_remote_l w( XPTR( cxy , &thread->flags ) );602 thread_type_t type = hal_remote_l32 ( XPTR( cxy , &thread->type ) ); 603 trdid_t trdid = hal_remote_l32 ( XPTR( cxy , &thread->trdid ) ); 604 uint32_t blocked = hal_remote_l32 ( XPTR( cxy , &thread->blocked ) ); 605 uint32_t flags = hal_remote_l32 ( XPTR( cxy , &thread->flags ) ); 501 606 process_t * process = hal_remote_lpt( XPTR( cxy , &thread->process ) ); 502 pid_t pid = hal_remote_l w( XPTR( cxy , &process->pid ) );607 pid_t pid = hal_remote_l32 ( XPTR( cxy , &process->pid ) ); 503 608 504 609 // display thread info … … 529 634 530 635 // get relevant thead info 531 thread_type_t type = hal_remote_l w( XPTR( cxy , &thread->type ) );532 trdid_t trdid = hal_remote_l w( XPTR( cxy , &thread->trdid ) );533 uint32_t blocked = hal_remote_l w( XPTR( cxy , &thread->blocked ) );534 uint32_t flags = hal_remote_l w( XPTR( cxy , &thread->flags ) );636 thread_type_t type = hal_remote_l32 ( XPTR( cxy , &thread->type ) ); 637 trdid_t trdid = hal_remote_l32 ( XPTR( cxy , &thread->trdid ) ); 638 uint32_t blocked = hal_remote_l32 ( XPTR( cxy , &thread->blocked ) ); 639 uint32_t flags = hal_remote_l32 ( XPTR( cxy , &thread->flags ) ); 535 640 process_t * process = hal_remote_lpt( XPTR( cxy , &thread->process ) ); 536 pid_t pid = hal_remote_l w( XPTR( cxy , &process->pid ) );641 pid_t pid = hal_remote_l32 ( XPTR( cxy , &process->pid ) ); 537 642 538 643 nolock_printk(" - %s / pid %X / trdid %X / desc %X / block %X / flags %X\n", … … 544 649 545 650 // release TXT0 lock 546 remote_ spinlock_unlock_busy( lock_xp , save_sr);651 remote_busylock_release( lock_xp ); 547 652 548 653 } // end sched_remote_display() 549 654 655 -
trunk/kernel/kern/scheduler.h
r470 r564 2 2 * scheduler.h - Core scheduler definition. 3 3 * 4 * Author Alain Greiner (2016 )4 * Author Alain Greiner (2016,2017,2018) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 27 27 #include <hal_kernel_types.h> 28 28 #include <list.h> 29 #include < spinlock.h>29 #include <busylock.h> 30 30 31 31 /**** Forward declarations ****/ … … 40 40 typedef struct scheduler_s 41 41 { 42 spinlock_t lock; /*! lock protecting lists of threads*/42 busylock_t lock; /*! lock protecting scheduler state */ 43 43 uint16_t u_threads_nr; /*! total number of attached user threads */ 44 44 uint16_t k_threads_nr; /*! total number of attached kernel threads */ … … 61 61 /********************************************************************************************* 62 62 * This function atomically register a new thread in a given core scheduler. 63 * Note: There is no specific sched_remove_thread(), as a thread is always deleted 64 * by the ched_handle_signals() function, called by the sched_yield() function. 63 65 ********************************************************************************************* 64 66 * @ core : local pointer on the core descriptor. … … 70 72 /********************************************************************************************* 71 73 * This function is the only method to make a context switch. It is called in cas of TICK, 72 * or when when a thread explicitely requires a scheduling. 73 * It handles the pending signals for all threads attached to the core running the calling 74 * thread, and calls the sched_select() function to select a new thread. 75 * The cause argument is only used for debug by the sched_display() function, and 76 * indicates the scheduling cause. 74 * or when a thread explicitely requires to be descheduled. 75 * It takes the scheduler busylock to atomically update the scheduled state. 76 * It calls the sched_select() private function to select a new thread. After switch, it 77 * calls the sched_handle_signals() private function to handle the pending REQ_ACK and 78 * REQ_DELETE flagss for all threads attached to the scheduler: it deletes all threads 79 * marked for delete (and the process descriptor when the deleted thread is the main thread). 80 * As the REQ_DELETE flag can be asynchronously set (between the select and the handle), 81 * the sched_handle-signals() function check that the thread to delete is not the new thread, 82 * because a thread cannot delete itself. 83 * The cause argument is only used for debug by the sched_display() functions, and indicates 84 * the scheduling cause. 77 85 ********************************************************************************************* 78 86 * @ cause : character string defining the scheduling cause. … … 80 88 void sched_yield( const char * cause ); 81 89 82 /*********************************************************************************************83 * This function scan all threads attached to a given scheduler, and executes the relevant84 * actions for pending THREAD_FLAG_REQ_ACK or THREAD_FLAG_REQ_DELETE requests.85 * It is called in by the sched_yield() function, with IRQ disabled.86 * - REQ_ACK : it checks that target thread is blocked, decrements the response counter87 * to acknowledge the client thread, and reset the pending request.88 * - REQ_DELETE : it detach the target thread from parent if attached, detach it from89 * the process, remove it from scheduler, release memory allocated to thread descriptor,90 * and destroy the process descriptor it the target thread was the last thread.91 *********************************************************************************************92 * @ core : local pointer on the core descriptor.93 ********************************************************************************************/94 void sched_handle_signals( struct core_s * core );95 96 /*********************************************************************************************97 * This function does NOT modify the scheduler state.98 * It just select a thread in the list of attached threads, implementing the following99 * three steps policy:100 * 1) It scan the list of kernel threads, from the next thread after the last executed one,101 * and returns the first runnable found : not IDLE, not blocked, client queue not empty.102 * It can be the current thread.103 * 2) If no kernel thread found, it scan the list of user thread, from the next thread after104 * the last executed one, and returns the first runable found : not blocked.105 * It can be the current thread.106 * 3) If no runable thread found, it returns the idle thread.107 *********************************************************************************************108 * @ core : local pointer on scheduler.109 * @ returns pointer on selected thread descriptor110 ********************************************************************************************/111 struct thread_s * sched_select( struct scheduler_s * sched );112 113 90 /********************************************************************************************* 114 91 * This debug function displays on TXT0 the internal state of a local scheduler, 115 * identified by the core local index <lid>. 92 * identified by the core local index <lid>. It must be called by a local thread. 116 93 ********************************************************************************************* 117 94 * @ lid : local index of target core. … … 123 100 * identified by the target cluster identifier <cxy> and the core local index <lid>. 124 101 * It can be called by a thread running in any cluster, as it uses remote accesses, 125 * to scan the scheduler l ocal lists of threads.102 * to scan the scheduler lists of threads. 126 103 ********************************************************************************************* 127 104 * @ cxy : target cluster identifier -
trunk/kernel/kern/thread.c
r531 r564 1 1 /* 2 * thread.c - implementation of thread operations(user & kernel)2 * thread.c - thread operations implementation (user & kernel) 3 3 * 4 4 * Author Ghassan Almaless (2008,2009,2010,2011,2012) 5 * Alain Greiner (2016,2017 )5 * Alain Greiner (2016,2017,2018) 6 6 * 7 7 * Copyright (c) UPMC Sorbonne Universites … … 48 48 ////////////////////////////////////////////////////////////////////////////////////// 49 49 50 extern process_t process_zero; 50 extern process_t process_zero; // allocated in kernel_init.c 51 extern char * lock_type_str[]; // allocated in kernel_init.c 52 extern chdev_directory_t chdev_dir; // allocated in kernel_init.c 51 53 52 54 ////////////////////////////////////////////////////////////////////////////////////// … … 145 147 cluster_t * local_cluster = LOCAL_CLUSTER; 146 148 147 #if DEBUG_THREAD_ USER_INIT149 #if DEBUG_THREAD_INIT 148 150 uint32_t cycle = (uint32_t)hal_get_cycles(); 149 if( DEBUG_THREAD_USER_INIT < cycle ) 150 printk("\n[DBG] %s : thread %x enter to init thread %x in process %x / cycle %d\n", 151 __FUNCTION__, CURRENT_THREAD, thread, process->pid , cycle ); 152 #endif 153 154 // register new thread in process descriptor, and get a TRDID 155 thread->type = type; // needed by process_register_thread. 156 error = process_register_thread( process, thread , &trdid ); 157 158 if( error ) 159 { 160 printk("\n[ERROR] in %s : cannot get TRDID\n", __FUNCTION__ ); 161 return EINVAL; 162 } 151 if( DEBUG_THREAD_INIT < cycle ) 152 printk("\n[DBG] %s : thread %x in process %x enter fot thread %x in process %x / cycle %d\n", 153 __FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid, 154 thread, process->pid , cycle ); 155 #endif 163 156 164 157 // compute thread descriptor size without kernel stack … … 166 159 167 160 // Initialize new thread descriptor 168 thread->trdid = trdid;161 thread->type = type; 169 162 thread->quantum = 0; // TODO 170 163 thread->ticks_nr = 0; // TODO … … 173 166 thread->process = process; 174 167 175 thread->local_locks = 0; 176 thread->remote_locks = 0; 177 178 #if CONFIG_LOCKS_DEBUG 179 list_root_init( &thread->locks_root ); 180 xlist_root_init( XPTR( local_cxy , &thread->xlocks_root ) ); 168 thread->busylocks = 0; 169 170 #if DEBUG_BUSYLOCK 171 xlist_root_init( XPTR( local_cxy , &thread->busylocks_root ) ); 181 172 #endif 182 173 … … 194 185 thread->blocked = THREAD_BLOCKED_GLOBAL; 195 186 196 // reset sched list 187 // register new thread in process descriptor, and get a TRDID 188 error = process_register_thread( process, thread , &trdid ); 189 190 if( error ) 191 { 192 printk("\n[ERROR] in %s : cannot get TRDID\n", __FUNCTION__ ); 193 return EINVAL; 194 } 195 196 // initialize trdid 197 thread->trdid = trdid; 198 199 // initialize sched list 197 200 list_entry_init( &thread->sched_list ); 198 201 199 // reset thread info 202 // initialize waiting queue entries 203 list_entry_init( &thread->wait_list ); 204 xlist_entry_init( XPTR( local_cxy , &thread->wait_xlist ) ); 205 206 // initialize thread info 200 207 memset( &thread->info , 0 , sizeof(thread_info_t) ); 201 208 202 // initialize sjoin_lock203 remote_ spinlock_init( XPTR( local_cxy , &thread->join_lock ));209 // initialize join_lock 210 remote_busylock_init( XPTR( local_cxy , &thread->join_lock ), LOCK_THREAD_JOIN ); 204 211 205 212 // initialise signature … … 216 223 dqdt_update_threads( 1 ); 217 224 218 #if DEBUG_THREAD_ USER_INIT225 #if DEBUG_THREAD_INIT 219 226 cycle = (uint32_t)hal_get_cycles(); 220 if( DEBUG_THREAD_USER_INIT < cycle ) 221 printk("\n[DBG] %s : thread %x exit after init of thread %x in process %x / cycle %d\n", 222 __FUNCTION__, CURRENT_THREAD, thread, process->pid , cycle ); 227 if( DEBUG_THREAD_INIT < cycle ) 228 printk("\n[DBG] %s : thread %x in process %x exit for thread %x in process %x / cycle %d\n", 229 __FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid, 230 thread, process->pid , cycle ); 223 231 #endif 224 232 … … 436 444 args = (void *) hal_remote_lpt( XPTR( parent_cxy , &parent_ptr->entry_args )); 437 445 base = (intptr_t)hal_remote_lpt( XPTR( parent_cxy , &parent_ptr->u_stack_base )); 438 size = (uint32_t)hal_remote_l w( XPTR( parent_cxy , &parent_ptr->u_stack_size ));439 flags = hal_remote_l w( XPTR( parent_cxy , &parent_ptr->flags ));446 size = (uint32_t)hal_remote_l32 ( XPTR( parent_cxy , &parent_ptr->u_stack_size )); 447 flags = hal_remote_l32 ( XPTR( parent_cxy , &parent_ptr->flags )); 440 448 uzone = (reg_t *) hal_remote_lpt( XPTR( parent_cxy , &parent_ptr->uzone_current )); 441 449 … … 474 482 } 475 483 484 #if (DEBUG_THREAD_USER_FORK & 1) 485 if( DEBUG_THREAD_USER_FORK < cycle ) 486 printk("\n[DBG] %s : thread %x in process %x / initialised thread %x in process %x\n", 487 __FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid, 488 child_ptr->trdid, child_process->pid ); 489 #endif 490 476 491 // return child pointer 477 492 *child_thread = child_ptr; … … 502 517 } 503 518 504 // create and initialize STACK vseg 519 #if (DEBUG_THREAD_USER_FORK & 1) 520 if( DEBUG_THREAD_USER_FORK < cycle ) 521 printk("\n[DBG] %s : thread %x in process %x / created CPU & FPU contexts\n", 522 __FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid ); 523 #endif 524 525 // create and initialize STACK vseg 505 526 vseg = vseg_alloc(); 506 527 vseg_init( vseg, … … 514 535 515 536 // register STACK vseg in local child VSL 516 vseg_attach( &child_process->vmm , vseg ); 537 vmm_vseg_attach( &child_process->vmm , vseg ); 538 539 #if (DEBUG_THREAD_USER_FORK & 1) 540 if( DEBUG_THREAD_USER_FORK < cycle ) 541 printk("\n[DBG] %s : thread %x in process %x / created stack vseg\n", 542 __FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid ); 543 #endif 517 544 518 545 // copy all valid STACK GPT entries … … 530 557 if( error ) 531 558 { 532 v seg_detach(vseg );559 vmm_vseg_detach( &child_process->vmm , vseg ); 533 560 vseg_free( vseg ); 534 561 thread_release( child_ptr ); … … 549 576 xptr_t lock_xp = XPTR( page_cxy , &page_ptr->lock ); 550 577 551 // increment the forks counter 552 remote_spinlock_lock( lock_xp ); 578 // get lock protecting page 579 remote_busylock_acquire( lock_xp ); 580 581 // increment the forks counter in page descriptor 553 582 hal_remote_atomic_add( forks_xp , 1 ); 554 remote_spinlock_unlock( lock_xp ); 583 584 // release lock protecting page 585 remote_busylock_release( lock_xp ); 555 586 556 587 #if (DEBUG_THREAD_USER_FORK & 1) … … 559 590 printk("\n[DBG] %s : thread %x in process %x copied one PTE to child GPT : vpn %x / forks %d\n", 560 591 __FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid, vpn, 561 hal_remote_l w( XPTR( page_cxy , &page_ptr->forks) ) );592 hal_remote_l32( XPTR( page_cxy , &page_ptr->forks) ) ); 562 593 #endif 563 594 … … 596 627 #endif 597 628 598 assert( (thread->type == THREAD_USER ) , "bad type" ); 599 assert( (thread->signature == THREAD_SIGNATURE) , "bad signature" );600 assert( (thread->local_locks == 0) , "bad local locks" );601 assert( (thread->remote_locks == 0) , "bad remotelocks" );629 // check parent thread attributes 630 assert( (thread->type == THREAD_USER ) , "bad type" ); 631 assert( (thread->signature == THREAD_SIGNATURE) , "bad signature" ); 632 assert( (thread->busylocks == 0) , "bad busylocks" ); 602 633 603 634 // re-initialize various thread descriptor fields … … 605 636 thread->ticks_nr = 0; // TODO 606 637 thread->time_last_check = 0; // TODO 607 608 #if CONFIG_LOCKS_DEBUG609 list_root_init( &thread->locks_root );610 xlist_root_init( XPTR( local_cxy , &thread->xlocks_root ) );611 #endif612 638 613 639 thread->entry_func = entry_func; … … 622 648 thread->fork_cxy = 0; // not inherited 623 649 650 // re-initialize busylocks counters 651 thread->busylocks = 0; 652 624 653 // reset thread info 625 654 memset( &thread->info , 0 , sizeof(thread_info_t) ); 626 655 627 // initialize join_lock628 remote_ spinlock_init( XPTR( local_cxy , &thread->join_lock ));656 // re-initialize join_lock 657 remote_busylock_init( XPTR( local_cxy , &thread->join_lock ), LOCK_THREAD_JOIN ); 629 658 630 659 // allocate an user stack vseg for main thread … … 664 693 hal_cpu_context_exec( thread ); 665 694 666 assert( false, "we should execute this code");695 assert( false, "we should not execute this code"); 667 696 668 697 return 0; … … 742 771 lid_t core_lid ) 743 772 { 744 assert( (type == THREAD_IDLE) , "illegal thread type" ); 745 assert( (core_lid < LOCAL_CLUSTER->cores_nr) , "illegal core index" ); 773 774 // check arguments 775 assert( (type == THREAD_IDLE) , "illegal thread type" ); 776 assert( (core_lid < LOCAL_CLUSTER->cores_nr) , "illegal core index" ); 746 777 747 778 // initialize thread descriptor … … 784 815 #endif 785 816 786 assert( (thread->local_locks == 0) , 787 "local lock not released for thread %x in process %x", thread->trdid, process->pid ); 788 789 assert( (thread->remote_locks == 0) , 790 "remote lock not released for thread %x in process %x", thread->trdid, process->pid ); 817 // check busylocks counter 818 assert( (thread->busylocks == 0) , 819 "busylock not released for thread %x in process %x", thread->trdid, process->pid ); 791 820 792 821 // update intrumentation values … … 890 919 } // thread_reset_req_ack() 891 920 892 ////////////////////////////////893 inline bool_t thread_can_yield( void )894 {895 thread_t * this = CURRENT_THREAD;896 return (this->local_locks == 0) && (this->remote_locks == 0);897 }898 899 /////////////////////////900 void thread_check_sched( void )901 {902 thread_t * this = CURRENT_THREAD;903 904 if( (this->local_locks == 0) &&905 (this->remote_locks == 0) &&906 (this->flags & THREAD_FLAG_SCHED) )907 {908 this->flags &= ~THREAD_FLAG_SCHED;909 sched_yield( "delayed scheduling" );910 }911 912 } // end thread_check_sched()913 914 921 ////////////////////////////////////// 915 922 void thread_block( xptr_t thread_xp, … … 930 937 printk("\n[DBG] %s : thread %x in process %x blocked thread %x in process %x / cause %x\n", 931 938 __FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid, 932 ptr->trdid, hal_remote_l w(XPTR( cxy , &process->pid )), cause );939 ptr->trdid, hal_remote_l32(XPTR( cxy , &process->pid )), cause ); 933 940 #endif 934 941 … … 953 960 printk("\n[DBG] %s : thread %x in process %x unblocked thread %x in process %x / cause %x\n", 954 961 __FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid, 955 ptr->trdid, hal_remote_l w(XPTR( cxy , &process->pid )), cause );962 ptr->trdid, hal_remote_l32(XPTR( cxy , &process->pid )), cause ); 956 963 #endif 957 964 … … 974 981 thread_t * target_ptr; // pointer on target thread 975 982 xptr_t target_flags_xp; // extended pointer on target thread <flags> 976 uint32_t target_flags; // target thread <flags> value977 983 xptr_t target_join_lock_xp; // extended pointer on target thread <join_lock> 978 984 xptr_t target_join_xp_xp; // extended pointer on target thread <join_xp> … … 982 988 thread_t * joining_ptr; // pointer on joining thread 983 989 cxy_t joining_cxy; // joining thread cluster 984 cxy_t owner_cxy; // process owner cluster 985 986 987 // get target thread pointers, identifiers, and flags 990 991 // get target thread cluster and local pointer 988 992 target_cxy = GET_CXY( target_xp ); 989 993 target_ptr = GET_PTR( target_xp ); 990 target_trdid = hal_remote_lw( XPTR( target_cxy , &target_ptr->trdid ) ); 994 995 // get target thread identifiers, and attached flag 996 target_trdid = hal_remote_l32( XPTR( target_cxy , &target_ptr->trdid ) ); 991 997 target_ltid = LTID_FROM_TRDID( target_trdid ); 992 998 target_flags_xp = XPTR( target_cxy , &target_ptr->flags ); 993 target_ flags = hal_remote_lw( target_flags_xp);999 target_attached = ( (hal_remote_l32( target_flags_xp ) & THREAD_FLAG_DETACHED) == 0 ); 994 1000 995 1001 // get killer thread pointers … … 998 1004 999 1005 #if DEBUG_THREAD_DELETE 1000 uint32_t cycle = (uint32_t)hal_get_cycles ;1006 uint32_t cycle = (uint32_t)hal_get_cycles(); 1001 1007 if( DEBUG_THREAD_DELETE < cycle ) 1002 printk("\n[DBG] %s : killer thread %x enter for target thread %x / cycle %d\n", 1003 __FUNCTION__, killer_ptr, target_ptr, cycle ); 1004 #endif 1005 1006 // target thread cannot be the main thread, because the main thread 1007 // must be deleted by the parent process sys_wait() function 1008 owner_cxy = CXY_FROM_PID( pid ); 1009 assert( ((owner_cxy != target_cxy) || (target_ltid != 0)), 1010 "tharget thread cannot be the main thread\n" ); 1008 printk("\n[DBG] %s : thread %x in process %x enters / target thread %x / cycle %d\n", 1009 __FUNCTION__, killer_ptr->trdid, killer_ptr->process->pid, target_ptr->trdid, cycle ); 1010 #endif 1011 1012 // check killer thread can yield 1013 assert( (killer_ptr->busylocks == 0), 1014 "cannot yield : busylocks = %d\n", killer_ptr->busylocks ); 1015 1016 // check target thread is not the main thread, because the main thread 1017 // must be deleted by the parent process sys_wait() function 1018 assert( ((CXY_FROM_PID( pid ) != target_cxy) || (target_ltid != 0)), 1019 "tharget thread cannot be the main thread\n" ); 1011 1020 1012 1021 // block the target thread 1013 1022 thread_block( target_xp , THREAD_BLOCKED_GLOBAL ); 1014 1023 1015 // get attached from target flag descriptor 1016 target_attached = ((hal_remote_lw( target_flags_xp ) & THREAD_FLAG_DETACHED) != 0); 1017 1018 // synchronize with the joining thread if the target thread is attached 1019 if( target_attached && (is_forced == false) ) 1020 { 1024 // synchronize with the joining thread if attached 1025 if( target_attached && (is_forced == false) ) 1026 { 1027 1028 #if (DEBUG_THREAD_DELETE & 1) 1029 if( DEBUG_THREAD_DELETE < cycle ) 1030 printk("\n[DBG] %s : thread %x in process %x / target thread is attached\n", 1031 __FUNCTION__, killer_ptr->trdid, killer_ptr->process->pid ); 1032 #endif 1021 1033 // build extended pointers on target thread join fields 1022 1034 target_join_lock_xp = XPTR( target_cxy , &target_ptr->join_lock ); … … 1027 1039 1028 1040 // take the join_lock in target thread descriptor 1029 remote_ spinlock_lock( target_join_lock_xp );1041 remote_busylock_acquire( target_join_lock_xp ); 1030 1042 1031 1043 // get join_done from target thread descriptor 1032 target_join_done = ((hal_remote_l w( target_flags_xp ) & THREAD_FLAG_JOIN_DONE) != 0);1044 target_join_done = ((hal_remote_l32( target_flags_xp ) & THREAD_FLAG_JOIN_DONE) != 0); 1033 1045 1034 1046 if( target_join_done ) // joining thread arrived first => unblock the joining thread 1035 1047 { 1048 1049 #if (DEBUG_THREAD_DELETE & 1) 1050 if( DEBUG_THREAD_DELETE < cycle ) 1051 printk("\n[DBG] %s : thread %x in process %x / joining thread arrived first\n", 1052 __FUNCTION__, killer_ptr->trdid, killer_ptr->process->pid ); 1053 #endif 1036 1054 // get extended pointer on joining thread 1037 joining_xp = (xptr_t)hal_remote_l wd( target_join_xp_xp );1055 joining_xp = (xptr_t)hal_remote_l64( target_join_xp_xp ); 1038 1056 joining_ptr = GET_PTR( joining_xp ); 1039 1057 joining_cxy = GET_CXY( joining_xp ); … … 1046 1064 1047 1065 // release the join_lock in target thread descriptor 1048 remote_spinlock_unlock( target_join_lock_xp ); 1066 remote_busylock_release( target_join_lock_xp ); 1067 1068 // set the REQ_DELETE flag in target thread descriptor 1069 hal_remote_atomic_or( target_flags_xp , THREAD_FLAG_REQ_DELETE ); 1049 1070 1050 1071 // restore IRQs 1051 1072 hal_restore_irq( save_sr ); 1052 1073 } 1053 else // thisthread arrived first => register flags and deschedule1074 else // killer thread arrived first => register flags and deschedule 1054 1075 { 1076 1077 #if (DEBUG_THREAD_DELETE & 1) 1078 if( DEBUG_THREAD_DELETE < cycle ) 1079 printk("\n[DBG] %s : thread %x in process %x / killer thread arrived first\n", 1080 __FUNCTION__, killer_ptr->trdid, killer_ptr->process->pid ); 1081 #endif 1055 1082 // set the kill_done flag in target thread 1056 1083 hal_remote_atomic_or( target_flags_xp , THREAD_FLAG_KILL_DONE ); … … 1060 1087 1061 1088 // set extended pointer on killer thread in target thread 1062 hal_remote_s wd( target_join_xp_xp , killer_xp );1089 hal_remote_s64( target_join_xp_xp , killer_xp ); 1063 1090 1064 1091 // release the join_lock in target thread descriptor 1065 remote_spinlock_unlock( target_join_lock_xp ); 1066 1092 remote_busylock_release( target_join_lock_xp ); 1093 1094 #if (DEBUG_THREAD_DELETE & 1) 1095 if( DEBUG_THREAD_DELETE < cycle ) 1096 printk("\n[DBG] %s : thread %x in process %x / killer thread deschedule\n", 1097 __FUNCTION__, killer_ptr->trdid, killer_ptr->process->pid ); 1098 #endif 1067 1099 // deschedule 1068 1100 sched_yield( "killer thread wait joining thread" ); 1101 1102 #if (DEBUG_THREAD_DELETE & 1) 1103 if( DEBUG_THREAD_DELETE < cycle ) 1104 printk("\n[DBG] %s : thread %x in process %x / killer thread resume\n", 1105 __FUNCTION__, killer_ptr->trdid, killer_ptr->process->pid ); 1106 #endif 1107 // set the REQ_DELETE flag in target thread descriptor 1108 hal_remote_atomic_or( target_flags_xp , THREAD_FLAG_REQ_DELETE ); 1069 1109 1070 1110 // restore IRQs 1071 1111 hal_restore_irq( save_sr ); 1072 1112 } 1073 } // end if attached 1074 1075 // set the REQ_DELETE flag in target thread descriptor 1076 hal_remote_atomic_or( target_flags_xp , THREAD_FLAG_REQ_DELETE ); 1113 } 1114 else // target thread not attached 1115 { 1116 // set the REQ_DELETE flag in target thread descriptor 1117 hal_remote_atomic_or( target_flags_xp , THREAD_FLAG_REQ_DELETE ); 1118 } 1077 1119 1078 1120 #if DEBUG_THREAD_DELETE 1079 1121 cycle = (uint32_t)hal_get_cycles; 1080 1122 if( DEBUG_THREAD_DELETE < cycle ) 1081 printk("\n[DBG] %s : killer thread %x exit fortarget thread %x / cycle %d\n",1082 __FUNCTION__, killer_ptr , target_ptr, cycle );1123 printk("\n[DBG] %s : thread %x in process %x exit / target thread %x / cycle %d\n", 1124 __FUNCTION__, killer_ptr->trdid, killer_ptr->process->pid, target_ptr->trdid, cycle ); 1083 1125 #endif 1084 1126 … … 1087 1129 1088 1130 1089 /////////////////////// 1131 ///////////////////////////// 1090 1132 void thread_idle_func( void ) 1091 1133 { 1092 1093 #if DEBUG_THREAD_IDLE1094 uint32_t cycle;1095 #endif1096 1097 1134 while( 1 ) 1098 1135 { … … 1104 1141 { 1105 1142 1106 #if (DEBUG_THREAD_IDLE & 1) 1107 cycle = (uint32_t)hal_get_cycles; 1143 #if DEBUG_THREAD_IDLE 1144 { 1145 uint32_t cycle = (uint32_t)hal_get_cycles(); 1108 1146 if( DEBUG_THREAD_IDLE < cycle ) 1109 1147 printk("\n[DBG] %s : idle thread on core[%x,%d] goes to sleep / cycle %d\n", 1110 1148 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, cycle ); 1149 } 1111 1150 #endif 1112 1151 1113 1152 hal_core_sleep(); 1114 1153 1115 #if (DEBUG_THREAD_IDLE & 1) 1116 cycle = (uint32_t)hal_get_cycles; 1154 #if DEBUG_THREAD_IDLE 1155 { 1156 uint32_t cycle = (uint32_t)hal_get_cycles(); 1117 1157 if( DEBUG_THREAD_IDLE < cycle ) 1118 1158 printk("\n[DBG] %s : idle thread on core[%x,%d] wake up / cycle %d\n", 1119 1159 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, cycle ); 1160 } 1120 1161 #endif 1121 1162 … … 1123 1164 1124 1165 #if DEBUG_THREAD_IDLE 1166 { 1167 uint32_t cycle = (uint32_t)hal_get_cycles(); 1168 if( DEBUG_THREAD_IDLE < cycle ) 1125 1169 sched_display( CURRENT_THREAD->core->lid ); 1170 } 1126 1171 #endif 1127 1128 1172 // search a runable thread 1129 sched_yield( "IDLE" ); 1130 } 1173 sched_yield( "running idle thread" ); 1174 1175 } // end while 1176 1131 1177 } // end thread_idle() 1132 1178 … … 1134 1180 /////////////////////////////////////////// 1135 1181 void thread_time_update( thread_t * thread, 1136 uint32_tis_user )1182 bool_t is_user ) 1137 1183 { 1138 1184 cycle_t current_cycle; // current cycle counter value … … 1154 1200 if( is_user ) info->usr_cycles += (current_cycle - last_cycle); 1155 1201 else info->sys_cycles += (current_cycle - last_cycle); 1156 } 1202 1203 } // end thread_time_update() 1157 1204 1158 1205 ///////////////////////////////////// … … 1174 1221 1175 1222 // check trdid argument 1176 if( (target_thread_ltid >= CONFIG_THREAD _MAX_PER_CLUSTER) ||1223 if( (target_thread_ltid >= CONFIG_THREADS_MAX_PER_CLUSTER) || 1177 1224 cluster_is_undefined( target_cxy ) ) return XPTR_NULL; 1178 1225 … … 1182 1229 sizeof(xlist_entry_t) ); 1183 1230 1184 // get extended pointer on lock protecting the list of processes1231 // get extended pointer on lock protecting the list of local processes 1185 1232 lock_xp = XPTR( target_cxy , &LOCAL_CLUSTER->pmgr.local_lock ); 1186 1233 1187 1234 // take the lock protecting the list of processes in target cluster 1188 remote_ spinlock_lock( lock_xp );1189 1190 // loop on list of process in target cluster to find the PID process1235 remote_queuelock_acquire( lock_xp ); 1236 1237 // scan the list of local processes in target cluster 1191 1238 xptr_t iter; 1192 1239 bool_t found = false; … … 1195 1242 target_process_xp = XLIST_ELEMENT( iter , process_t , local_list ); 1196 1243 target_process_ptr = GET_PTR( target_process_xp ); 1197 target_process_pid = hal_remote_l w( XPTR( target_cxy , &target_process_ptr->pid ) );1244 target_process_pid = hal_remote_l32( XPTR( target_cxy , &target_process_ptr->pid ) ); 1198 1245 if( target_process_pid == pid ) 1199 1246 { … … 1204 1251 1205 1252 // release the lock protecting the list of processes in target cluster 1206 remote_ spinlock_unlock( lock_xp );1253 remote_queuelock_release( lock_xp ); 1207 1254 1208 1255 // check PID found … … 1216 1263 1217 1264 return XPTR( target_cxy , target_thread_ptr ); 1265 1266 } // end thread_get_xptr() 1267 1268 /////////////////////////////////////////////////// 1269 void thread_assert_can_yield( thread_t * thread, 1270 const char * func_str ) 1271 { 1272 // does nothing if thread does not hold any busylock 1273 1274 if( thread->busylocks ) 1275 { 1276 // get pointers on TXT0 chdev 1277 xptr_t txt0_xp = chdev_dir.txt_tx[0]; 1278 cxy_t txt0_cxy = GET_CXY( txt0_xp ); 1279 chdev_t * txt0_ptr = GET_PTR( txt0_xp ); 1280 1281 // get extended pointer on TXT0 lock 1282 xptr_t txt0_lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock ); 1283 1284 // get TXT0 lock 1285 remote_busylock_acquire( txt0_lock_xp ); 1286 1287 // display error message on TXT0 1288 nolock_printk("\n[PANIC] in %s / thread %x in process %x [%x] cannot yield : " 1289 "%d busylock(s) / cycle %d\n", 1290 func_str, thread->trdid, thread->process->pid, thread, 1291 thread->busylocks, (uint32_t)hal_get_cycles() ); 1292 1293 #if DEBUG_BUSYLOCK 1294 if( XPTR( local_cxy , thread ) == DEBUG_BUSYLOCK_THREAD_XP ) 1295 { 1296 // get root of list of taken busylocks 1297 xptr_t root_xp = XPTR( local_cxy , &thread->busylocks_root ); 1298 xptr_t iter_xp; 1299 1300 // scan list of busylocks 1301 XLIST_FOREACH( root_xp , iter_xp ) 1302 { 1303 xptr_t lock_xp = XLIST_ELEMENT( iter_xp , busylock_t , xlist ); 1304 cxy_t lock_cxy = GET_CXY( lock_xp ); 1305 busylock_t * lock_ptr = GET_PTR( lock_xp ); 1306 uint32_t lock_type = hal_remote_l32( XPTR( lock_cxy , &lock_ptr->type ) ); 1307 nolock_printk(" - %s in cluster %x\n", lock_type_str[lock_type] , lock_cxy ); 1308 } 1218 1309 } 1219 1310 #endif 1311 1312 // release TXT0 lock 1313 remote_busylock_release( txt0_lock_xp ); 1314 1315 // suicide 1316 hal_core_sleep(); 1317 } 1318 } // end thread_assert_can yield() 1319 1320 #if DEBUG_BUSYLOCK 1321 1322 //////////////////////////////////////////////////// 1323 void thread_display_busylocks( uint32_t lock_type, 1324 bool_t is_acquire ) 1325 { 1326 xptr_t iter_xp; 1327 1328 // get cluster and local pointer of target thread 1329 cxy_t thread_cxy = GET_CXY( DEBUG_BUSYLOCK_THREAD_XP ); 1330 thread_t * thread_ptr = GET_PTR( DEBUG_BUSYLOCK_THREAD_XP ); 1331 1332 // get extended pointer on root of busylocks 1333 xptr_t root_xp = XPTR( thread_cxy , &thread_ptr->busylocks_root ); 1334 1335 // get pointers on TXT0 chdev 1336 xptr_t txt0_xp = chdev_dir.txt_tx[0]; 1337 cxy_t txt0_cxy = GET_CXY( txt0_xp ); 1338 chdev_t * txt0_ptr = GET_PTR( txt0_xp ); 1339 1340 // get extended pointer on remote TXT0 lock 1341 xptr_t txt0_lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock ); 1342 1343 // get TXT0 lock 1344 remote_busylock_acquire( txt0_lock_xp ); 1345 1346 if( is_acquire ) 1347 { 1348 nolock_printk("\n### thread [%x,%x] ACQUIRE lock %s / root %x / locks :\n", 1349 thread_cxy, thread_ptr, lock_type_str[lock_type], GET_PTR(root_xp) ); 1350 } 1351 else 1352 { 1353 nolock_printk("\n### thread [%x,%x] RELEASE lock %s / root %x / locks :\n", 1354 thread_cxy, thread_ptr, lock_type_str[lock_type], GET_PTR(root_xp) ); 1355 } 1356 1357 int i; 1358 1359 XLIST_FOREACH( root_xp , iter_xp ) 1360 { 1361 xptr_t ilock_xp = XLIST_ELEMENT( iter_xp , busylock_t , xlist ); 1362 cxy_t ilock_cxy = GET_CXY( ilock_xp ); 1363 busylock_t * ilock_ptr = GET_PTR( ilock_xp ); 1364 uint32_t ilock_type = hal_remote_l32( XPTR( ilock_cxy , &ilock_ptr->type ) ); 1365 nolock_printk(" - %s in cluster %x\n", lock_type_str[ilock_type] , ilock_cxy ); 1366 } 1367 1368 // release TXT0 lock 1369 remote_busylock_release( txt0_lock_xp ); 1370 } 1371 #endif -
trunk/kernel/kern/thread.h
r527 r564 3 3 * 4 4 * Author Ghassan Almaless (2008,2009,2010,2011,2012) 5 * Alain Greiner (2016 )5 * Alain Greiner (2016,2017,2018) 6 6 * 7 7 * Copyright (c) UPMC Sorbonne Universites … … 32 32 #include <list.h> 33 33 #include <hal_context.h> 34 #include < spinlock.h>34 #include <remote_busylock.h> 35 35 #include <core.h> 36 36 #include <chdev.h> … … 92 92 #define THREAD_BLOCKED_ISR 0x0400 /*! thread DEV wait ISR */ 93 93 #define THREAD_BLOCKED_WAIT 0x0800 /*! thread wait child process termination */ 94 #define THREAD_BLOCKED_LOCK 0x1000 /*! thread wait queuelock or rwlock */ 94 95 95 96 /*************************************************************************************** … … 119 120 * This TRDID is computed by the process_register_thread() function, when the user 120 121 * thread is registered in the local copy of the process descriptor. 121 * WARNING : Don't modify the first 4 fields order, as this order is used by the 122 * hal_kentry assembly code for the TSAR architecture. 122 * 123 * WARNING (1) Don't modify the first 4 fields order, as this order is used by the 124 * hal_kentry assembly code for some architectures (TSAR). 125 * 126 * WARNING (2) Most of the thread state is private and accessed only by this thread, 127 * but some fields are shared, and can be modified by other threads. 128 * - the "blocked" bit_vector can be modified by another thread 129 * running in another cluster (using atomic instructions), 130 * to change this thread scheduling status. 131 * - the "flags" bit_vector can be modified by another thread 132 * running in another cluster (using atomic instructions), 133 * to register requests such as ACK or DELETE. 134 * - the "join_xp" field can be modified by the joining thread, 135 * and this rendez-vous is protected by the dedicated "join_lock". 136 * 137 * WARNING (3) When this thread is blocked on a shared resource (queuelock, condvar, 138 * or chdev), it registers in the associated waiting queue, using the 139 * "wait_list" (local list) or "wait_xlist" (trans-cluster list) fields. 123 140 **************************************************************************************/ 124 141 … … 144 161 xptr_t parent; /*! extended pointer on parent thread */ 145 162 146 remote_ spinlock_t join_lock; /*! lock protecting the join/exit */163 remote_busylock_t join_lock; /*! lock protecting the join/exit */ 147 164 xptr_t join_xp; /*! joining/killer thread extended pointer */ 148 165 … … 180 197 cxy_t rpc_client_cxy; /*! client cluster index (for a RPC thread) */ 181 198 182 xlist_entry_t wait_list; /*! member of threads blocked on same cond */ 183 184 list_entry_t locks_root; /*! root of list of locks taken */ 185 xlist_entry_t xlocks_root; /*! root of xlist of remote locks taken */ 186 uint32_t local_locks; /*! number of local locks owned by thread */ 187 uint32_t remote_locks; /*! number of remote locks owned by thread */ 199 list_entry_t wait_list; /*! member of a local waiting queue */ 200 xlist_entry_t wait_xlist; /*! member of a trans-cluster waiting queue */ 201 202 uint32_t busylocks; /*! number of taken busylocks */ 203 204 #if DEBUG_BUSYLOCK 205 xlist_entry_t busylocks_root; /*! root of xlist of taken busylocks */ 206 #endif 188 207 189 208 thread_info_t info; /*! embedded thread_info_t */ … … 311 330 312 331 /*************************************************************************************** 313 * This function is called by the sched_handle_signals() function to releases332 * This low-level function is called by the sched_handle_signals() function to releases 314 333 * the physical memory allocated for a thread in a given cluster, when this thread 315 334 * is marked for delete. This include the thread descriptor itself, the associated … … 363 382 **************************************************************************************/ 364 383 void thread_reset_req_ack( thread_t * target ); 365 366 /***************************************************************************************367 * This function checks if the calling thread can deschedule.368 ***************************************************************************************369 * @ returns true if no locks taken.370 **************************************************************************************/371 inline bool_t thread_can_yield( void );372 373 /***************************************************************************************374 * This function implements the delayed descheduling mechanism : It is called by375 * all lock release functions, and calls the sched_yield() function when all locks376 * have beeen released and the calling thread THREAD_FLAG_SCHED flag is set.377 **************************************************************************************/378 void thread_check_sched( void );379 384 380 385 /*************************************************************************************** … … 417 422 * thread descriptor identified by the <thread_xp> argument. 418 423 * We need an extended pointer, because the client thread of an I/O operation on a 419 * given device is not in the same cluster as the associated device descriptor.424 * given device is generally not in the same cluster as the associated server thread. 420 425 * WARNING : this function does not reschedule the remote thread. 421 426 * The scheduling can be forced by sending an IPI to the core running the remote thread. … … 432 437 *************************************************************************************** 433 438 * @ thread : local pointer on target thread. 434 * @ is_user : update user time if non zero / update kernel time if zero439 * @ is_user : update user time if true / update kernel time if false 435 440 **************************************************************************************/ 436 441 void thread_time_update( thread_t * thread, 437 uint32_tis_user );442 bool_t is_user ); 438 443 439 444 /*************************************************************************************** … … 449 454 trdid_t trdid ); 450 455 456 /*************************************************************************************** 457 * This function checks that the thread identified by the <thread> argument does hold 458 * any busylock (local or remote). 459 * If the xlist of taken busylocks is not empty, it displays the set of taken locks, 460 * and makes a kernel panic. 461 *************************************************************************************** 462 * @ thread : local pointer on target thread. 463 * @ func_str : faulty function name. 464 **************************************************************************************/ 465 void thread_assert_can_yield( thread_t * thread, 466 const char * func_str ); 467 468 /*************************************************************************************** 469 * This debug function display the list of busylocks currently owned by a thread 470 * identified by the DEBUG_BUSYLOCK_THREAD_XP parameter. 471 * It is called each time the target thread acquire or release a busylock 472 * (local or remote). It is never called when DEBUG_BUSYLOCK_THEAD_CP == 0. 473 *************************************************************************************** 474 * @ lock_type : type of acquired / released busylock. 475 * @ is_acquire : change is an acquire when true / change is a release when false. 476 **************************************************************************************/ 477 void thread_display_busylocks( uint32_t lock_type, 478 bool_t is_acquire ); 479 480 451 481 452 482 #endif /* _THREAD_H_ */
Note: See TracChangeset
for help on using the changeset viewer.