Changeset 296 for trunk/kernel
- Timestamp:
- Jul 31, 2017, 1:59:52 PM (7 years ago)
- Location:
- trunk/kernel
- Files:
-
- 30 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/kernel/devices/dev_dma.c
r279 r296 76 76 77 77 // start server thread 78 thread_block( new_thread , THREAD_BLOCKED_DEV_QUEUE ); 78 79 thread_unblock( XPTR( local_cxy , new_thread ) , THREAD_BLOCKED_GLOBAL ); 79 80 -
trunk/kernel/devices/dev_ioc.c
r279 r296 81 81 82 82 // start server thread 83 thread_block( new_thread , THREAD_BLOCKED_DEV_QUEUE ); 83 84 thread_unblock( XPTR( local_cxy , new_thread ) , THREAD_BLOCKED_GLOBAL ); 84 85 … … 192 193 dev_pic_disable_irq( lid , ioc_xp ); 193 194 194 ioc_dmsg("\n[INFO] %s : coucou 3\n",195 __FUNCTION__ );196 197 195 // call driver function 198 196 cmd( XPTR( local_cxy , this ) ); -
trunk/kernel/devices/dev_nic.c
r279 r296 80 80 81 81 // start server thread 82 thread_block( new_thread , THREAD_BLOCKED_DEV_QUEUE ); 82 83 thread_unblock( XPTR( local_cxy , new_thread ) , THREAD_BLOCKED_GLOBAL ); 83 84 … … 128 129 // block on THREAD_BLOCKED_IO condition and deschedule 129 130 thread_block( thread_ptr , THREAD_BLOCKED_IO ); 130 sched_yield( );131 sched_yield( NULL ); 131 132 132 133 // disable NIC-RX IRQ … … 198 199 // block on THREAD_BLOCKED I/O condition and deschedule 199 200 thread_block( thread_ptr , THREAD_BLOCKED_IO ); 200 sched_yield( );201 sched_yield( NULL ); 201 202 202 203 // disable NIC-TX IRQ -
trunk/kernel/devices/dev_txt.c
r279 r296 86 86 87 87 // start server thread 88 thread_block( new_thread , THREAD_BLOCKED_DEV_QUEUE ); 88 89 thread_unblock( XPTR( local_cxy , new_thread ) , THREAD_BLOCKED_GLOBAL ); 89 90 } -
trunk/kernel/kern/chdev.c
r23 r296 129 129 // client thread goes to blocked state and deschedule 130 130 thread_block( thread_ptr , THREAD_BLOCKED_IO ); 131 sched_yield( );131 sched_yield( NULL ); 132 132 133 133 } // end chdev_register_command() … … 162 162 // block and deschedule 163 163 thread_block( server , THREAD_BLOCKED_DEV_QUEUE ); 164 sched_yield( );164 sched_yield( NULL ); 165 165 } 166 166 else -
trunk/kernel/kern/cluster.c
r279 r296 117 117 __FUNCTION__ , local_cxy , hal_get_cycles() ); 118 118 119 // initialises all cores descriptors 119 // initialises all cores descriptors 120 120 for( lid = 0 ; lid < cluster->cores_nr; lid++ ) 121 121 { -
trunk/kernel/kern/core.c
r279 r296 115 115 116 116 // handle scheduler TODO improve the scheduling condition ... AG 117 if( (ticks % 10) == 0 ) sched_yield( );117 if( (ticks % 10) == 0 ) sched_yield( NULL ); 118 118 119 119 // update DQDT TODO This update should depend on the cluster identifier, -
trunk/kernel/kern/kernel_init.c
r285 r296 29 29 #include <hal_context.h> 30 30 #include <hal_irqmask.h> 31 #include <hal_ppm.h> 31 32 #include <barrier.h> 32 33 #include <remote_barrier.h> … … 784 785 785 786 if( (core_lid == 0) && (local_cxy == 0) ) 786 kinit_dmsg("\n[INFO] %s exit barrier 0 at cycle %d : TXT0 initialized\n",787 __FUNCTION__, (uint32_t)hal_time_stamp());787 kinit_dmsg("\n[INFO] %s : exit barrier 0 : TXT0 initialized / cycle %d\n", 788 __FUNCTION__, hal_time_stamp() ); 788 789 789 790 ///////////////////////////////////////////////////////////////////////////// … … 822 823 823 824 if( (core_lid == 0) && (local_cxy == 0) ) 824 kinit_dmsg("\n[INFO] %s exit barrier 1 at cycle %d : clusters initialised\n",825 __FUNCTION__, (uint32_t)hal_time_stamp());825 kinit_dmsg("\n[INFO] %s : exit barrier 1 : clusters initialised / cycle %d\n", 826 __FUNCTION__, hal_time_stamp() ); 826 827 827 828 ///////////////////////////////////////////////////////////////////////////////// 828 829 // STEP 2 : all CP0s initialize the process_zero descriptor. 829 // CP0 in cluster 0 initiali ses the IOPIC device.830 // CP0 in cluster 0 initializes the IOPIC device. 830 831 ///////////////////////////////////////////////////////////////////////////////// 831 832 … … 847 848 848 849 if( (core_lid == 0) && (local_cxy == 0) ) 849 kinit_dmsg("\n[INFO] %s exit barrier 2 at cycle %d : PIC initialised\n",850 __FUNCTION__, (uint32_t)hal_time_stamp());850 kinit_dmsg("\n[INFO] %s : exit barrier 2 : PIC initialised / cycle %d\n", 851 __FUNCTION__, hal_time_stamp() ); 851 852 852 853 //////////////////////////////////////////////////////////////////////////////// 853 // STEP 3 : all CP0s complete the distibuted LAPIC initialization.854 // all CP0s initialize the irinternal chdev descriptors855 // all CP0s initialize the irlocal external chdev descriptors854 // STEP 3 : all CP0s initialize the distibuted LAPIC descriptor. 855 // all CP0s initialize the internal chdev descriptors 856 // all CP0s initialize the local external chdev descriptors 856 857 //////////////////////////////////////////////////////////////////////////////// 857 858 … … 878 879 879 880 if( (core_lid == 0) && (local_cxy == 0) ) 880 kinit_dmsg("\n[INFO] %s exit barrier 3 at cycle %d : all chdev initialised\n",881 __FUNCTION__, (uint32_t)hal_time_stamp());881 kinit_dmsg("\n[INFO] %s : exit barrier 3 : all chdev initialised / cycle %d\n", 882 __FUNCTION__, hal_time_stamp()); 882 883 883 884 ///////////////////////////////////////////////////////////////////////////////// 884 885 // STEP 4 : All cores enable IPI (Inter Procesor Interrupt), 886 // All cores initialise specific core registers 885 887 // Alh cores initialize IDLE thread. 886 888 // Only CP0 in cluster 0 creates the VFS root inode. … … 891 893 892 894 // All cores enable the shared IPI channel 893 894 // @@@895 hal_set_ebase( 0x1000 );896 // @@@897 898 895 dev_pic_enable_ipi(); 899 896 hal_enable_irq( &status ); 900 897 898 // All cores initialize specific core registers 899 hal_core_init( info ); 900 901 901 kinit_dmsg("\n[INFO] %s : IRQs enabled for core[%x,%d] / SR = %x\n", 902 902 __FUNCTION__ , local_cxy , core_lid , hal_get_sr() ); 903 903 904 // all cores create the idle thread descriptor904 // all cores initialize the idle thread descriptor 905 905 error = thread_kernel_init( thread, 906 906 THREAD_IDLE, … … 915 915 } 916 916 917 // all cores register idle thread in scheduler 917 // all cores unblock idle thread, and register it in scheduler 918 thread_unblock( XPTR( local_cxy , thread ) , THREAD_BLOCKED_GLOBAL ); 918 919 core->scheduler.idle = thread; 919 920 920 // all core activate the idle thread921 thread_unblock( XPTR( local_cxy , thread ) , THREAD_BLOCKED_GLOBAL );922 923 921 if( (core_lid == 0) && (local_cxy == 0) ) 924 922 { 925 kinit_dmsg("\n[INFO] %s : created idle thread %x at cycle %d\n", 926 __FUNCTION__ , thread , (uint32_t)hal_time_stamp()); 927 } 923 kinit_dmsg("\n[INFO] %s : initialized idle thread %x on core[%x,%d] / cycle %d\n", 924 __FUNCTION__ , thread->trdid , local_cxy, core_lid, (uint32_t)hal_time_stamp()); 925 } 926 927 #if CONFIG_KINIT_DEBUG 928 sched_display(); 929 #endif 928 930 929 931 // CPO in cluster 0 creates the VFS root … … 991 993 992 994 if( (core_lid == 0) && (local_cxy == 0) ) 993 kinit_dmsg("\n[INFO] %s exit barrier 4 at cycle %d : VFS OK in cluster 0\n",994 __FUNCTION__, (uint32_t)hal_time_stamp());995 kinit_dmsg("\n[INFO] %s : exit barrier 4 : VFS_root = %l in cluster 0 / cycle %d\n", 996 __FUNCTION__, vfs_root_inode_xp , hal_time_stamp()); 995 997 996 998 ///////////////////////////////////////////////////////////////////////////////// … … 1010 1012 fatfs_ctx_t * fatfs_ctx = fatfs_ctx_alloc(); 1011 1013 1012 assert( (fatfs_ctx != NULL) , __FUNCTION__ , 1013 "cannot create FATFS context\n" ); 1014 assert( (fatfs_ctx != NULL) , __FUNCTION__ , "cannot create FATFS context\n" ); 1014 1015 1015 1016 // get local pointer on VFS context for FATFS … … 1031 1032 1032 1033 // get extended pointer on VFS root inode from cluster 0 1033 vfs_root_inode_xp = hal_remote_lwd( XPTR( 0 , process_zero.vfs_root_xp ) );1034 vfs_root_inode_xp = hal_remote_lwd( XPTR( 0 , &process_zero.vfs_root_xp ) ); 1034 1035 1035 1036 // update local process_zero descriptor … … 1044 1045 ///////////////////////////////////////////////////////////////////////////////// 1045 1046 1046 // if( (core_lid == 0) && (local_cxy == 0) ) 1047 kinit_dmsg("\n[INFO] %s exit barrier 5 at cycle %d : VFS OK in all clusters\n", 1048 __FUNCTION__, (uint32_t)hal_time_stamp()); 1049 1047 if( (core_lid == 0) && (local_cxy == io_cxy) ) 1048 kinit_dmsg("\n[INFO] %s : exit barrier 5 : VFS_root = %l in cluster IO / cycle %d\n", 1049 __FUNCTION__, vfs_root_inode_xp , hal_time_stamp() ); 1050 1050 1051 1051 ///////////////////////////////////////////////////////////////////////////////// … … 1072 1072 } 1073 1073 1074 printk("\n@@@ %s : cluster %x reach barrier 6\n", __FUNCTION__ , local_cxy );1075 1076 1074 ///////////////////////////////////////////////////////////////////////////////// 1077 1075 if( core_lid == 0 ) remote_barrier( XPTR( io_cxy , &global_barrier ), … … 1080 1078 ///////////////////////////////////////////////////////////////////////////////// 1081 1079 1082 // if( (core_lid == 0) && (local_cxy == 0) )1083 kinit_dmsg("\n[INFO] %s exit barrier 6 at cycle %d : DEVFS OK in cluster IO\n",1084 __FUNCTION__, (uint32_t)hal_time_stamp());1080 if( (core_lid == 0) && (local_cxy == io_cxy) ) 1081 kinit_dmsg("\n[INFO] %s : exit barrier 6 : dev_root = %l in cluster IO / cycle %d\n", 1082 __FUNCTION__, devfs_dev_inode_xp , hal_time_stamp() ); 1085 1083 1086 1084 ///////////////////////////////////////////////////////////////////////////////// … … 1118 1116 1119 1117 if( (core_lid == 0) && (local_cxy == 0) ) 1120 kinit_dmsg("\n[INFO] %s exit barrier 7 at cycle %d : DEVFS OK in all clusters\n",1121 __FUNCTION__, (uint32_t)hal_time_stamp());1118 kinit_dmsg("\n[INFO] %s : exit barrier 7 : dev_root = %l in cluster 0 / cycle %d\n", 1119 __FUNCTION__, devfs_dev_inode_xp , hal_time_stamp() ); 1122 1120 1123 1121 #if CONFIG_KINIT_DEBUG … … 1141 1139 1142 1140 if( (core_lid == 0) && (local_cxy == 0) ) 1143 kinit_dmsg("\n[INFO] %s exit barrier 8 at cycle %d : process init created\n",1144 __FUNCTION__ , (uint32_t)hal_time_stamp() );1141 kinit_dmsg("\n[INFO] %s : exit barrier 8 : process init created / cycle %d\n", 1142 __FUNCTION__ , hal_time_stamp() ); 1145 1143 1146 1144 ///////////////////////////////////////////////////////////////////////////////// … … 1198 1196 dev_pic_enable_timer( CONFIG_SCHED_TICK_PERIOD ); 1199 1197 1200 if( (core_lid == 0) && (local_cxy == io_cxy) )1201 thread_dmsg("\n[INFO] %s complete kernel init in cluster 0 at cycle %d\n"1202 __FUNCTION__ , (uint32_t)hal_time_stamp() )1203 1204 1198 // each core jump to idle thread 1205 1199 thread_idle_func(); -
trunk/kernel/kern/printk.c
r279 r296 401 401 } 402 402 403 //////////////////////////////////////// 404 void nolock_printk( char * format , ...) 405 { 406 va_list args; 407 408 // call kernel_printf on TXT0, in busy waiting mode 409 va_start( args , format ); 410 kernel_printf( 0 , 1 , format , &args ); 411 va_end( args ); 412 } 413 403 414 /////////////////////////////////////////// 404 415 inline void assert( bool_t condition, -
trunk/kernel/kern/printk.h
r279 r296 67 67 * This function displays a formated string on the kernel terminal TXT0, 68 68 * using a busy waiting policy: It calls directly the relevant TXT driver, 69 * after taking the lock for exclusive access to the TXT0 terminal.69 * after taking the TXT0 lock. 70 70 ********************************************************************************** 71 71 * @ format : formated string. 72 72 *********************************************************************************/ 73 73 void printk( char* format, ... ); 74 75 /********************************************************************************** 76 * This function displays a formated string on the kernel terminal TXT0, 77 * using a busy waiting policy: It calls directly the relevant TXT driver, 78 * without taking the TXT0 lock. 79 ********************************************************************************** 80 * @ format : formated string. 81 *********************************************************************************/ 82 void nolock_printk( char* format, ... ); 74 83 75 84 /********************************************************************************** -
trunk/kernel/kern/rpc.c
r279 r296 99 99 uint32_t * ppn ) // out 100 100 { 101 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");102 103 rpc_dmsg("\n[INFO] %s : enter\n", __FUNCTION__);101 rpc_dmsg("\n[INFO] %s : enter at cycle %d\n", __FUNCTION__ , hal_time_stamp() ); 102 103 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); 104 104 105 105 // initialise RPC descriptor header … … 118 118 *ppn = (uint32_t)rpc.args[1]; 119 119 120 rpc_dmsg("\n[INFO] %s : completed \n", __FUNCTION__);120 rpc_dmsg("\n[INFO] %s : completed at cycle %d\n", __FUNCTION__ , hal_time_stamp() ); 121 121 } 122 122 … … 127 127 error_t error; // output 128 128 uint32_t ppn; // output 129 130 rpc_dmsg("\n[INFO] %s : enter at cycle %d\n", __FUNCTION__ , hal_time_stamp() ); 129 131 130 132 // get client cluster identifier and pointer on RPC descriptor … … 143 145 hal_remote_sw( XPTR( cxy , &desc->args[0] ) , error ); 144 146 hal_remote_sw( XPTR( cxy , &desc->args[1] ) , ppn ); 147 148 rpc_dmsg("\n[INFO] %s : completed at cycle %d\n", __FUNCTION__ , hal_time_stamp() ); 145 149 } 146 150 … … 155 159 pid_t * pid ) // out 156 160 { 157 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");158 159 rpc_dmsg("\n[INFO] %s : enter\n", __FUNCTION__);161 rpc_dmsg("\n[INFO] %s : enter at cycle %d\n", __FUNCTION__ , hal_time_stamp() ); 162 163 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); 160 164 161 165 // initialise RPC descriptor header … … 174 178 *error = (error_t)rpc.args[2]; 175 179 176 rpc_dmsg("\n[INFO] %s : completed \n", __FUNCTION__);180 rpc_dmsg("\n[INFO] %s : completed at cycle %d\n", __FUNCTION__ , hal_time_stamp() ); 177 181 } 178 182 … … 183 187 error_t error; // output : error status 184 188 pid_t pid; // output : process identifier 189 190 rpc_dmsg("\n[INFO] %s : enter at cycle %d\n", __FUNCTION__ , hal_time_stamp() ); 185 191 186 192 // get client cluster identifier and pointer on RPC descriptor … … 198 204 hal_remote_sw( XPTR( client_cxy , &desc->args[0] ) , (uint64_t)error ); 199 205 hal_remote_sw( XPTR( client_cxy , &desc->args[1] ) , (uint64_t)pid ); 206 207 rpc_dmsg("\n[INFO] %s : completed at cycle %d\n", __FUNCTION__ , hal_time_stamp() ); 200 208 } 201 209 … … 210 218 error_t * error ) // out 211 219 { 212 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");213 214 rpc_dmsg("\n[INFO] %s : enter\n", __FUNCTION__);220 rpc_dmsg("\n[INFO] %s : enter at cycle %d\n", __FUNCTION__ , hal_time_stamp() ); 221 222 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); 215 223 216 224 // initialise RPC descriptor header … … 228 236 *error = (error_t)rpc.args[1]; 229 237 230 rpc_dmsg("\n[INFO] %s : completed \n", __FUNCTION__);238 rpc_dmsg("\n[INFO] %s : completed at cycle %d\n", __FUNCTION__ , hal_time_stamp() ); 231 239 } 232 240 … … 237 245 exec_info_t info; // local copy of exec_info structure 238 246 error_t error; // local error error status 247 248 rpc_dmsg("\n[INFO] %s : enter at cycle %d\n", __FUNCTION__ , hal_time_stamp() ); 239 249 240 250 // get client cluster identifier and pointer on RPC descriptor … … 255 265 // set output argument into client RPC descriptor 256 266 hal_remote_swd( XPTR( client_cxy , &desc->args[1] ) , (uint64_t)error ); 267 268 rpc_dmsg("\n[INFO] %s : completed at cycle %d\n", __FUNCTION__ , hal_time_stamp() ); 257 269 } 258 270 … … 265 277 void rpc_process_kill_client( process_t * process ) 266 278 { 279 rpc_dmsg("\n[INFO] %s : enter at cycle %d\n", __FUNCTION__ , hal_time_stamp() ); 280 267 281 // only reference cluster can send this RPC 268 282 assert( (GET_CXY( process->ref_xp ) == local_cxy) , __FUNCTION__ , 269 283 "caller must be reference process cluster\n"); 270 271 rpc_dmsg("\n[INFO] %s : enter\n", __FUNCTION__ );272 284 273 285 // get local process index in reference cluster … … 297 309 } 298 310 299 rpc_dmsg("\n[INFO] %s : completed \n", __FUNCTION__);311 rpc_dmsg("\n[INFO] %s : completed at cycle %d\n", __FUNCTION__ , hal_time_stamp() ); 300 312 } 301 313 … … 305 317 pid_t pid; 306 318 process_t * process; 319 320 rpc_dmsg("\n[INFO] %s : enter at cycle %d\n", __FUNCTION__ , hal_time_stamp() ); 307 321 308 322 // get client cluster identifier and pointer on RPC descriptor … … 325 339 process_kill( process ); 326 340 } 341 342 rpc_dmsg("\n[INFO] %s : completed at cycle %d\n", __FUNCTION__ , hal_time_stamp() ); 327 343 } 328 344 … … 341 357 error_t * error ) // out 342 358 { 343 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");344 345 rpc_dmsg("\n[INFO] %s : enter\n", __FUNCTION__);359 rpc_dmsg("\n[INFO] %s : enter at cycle %d\n", __FUNCTION__ , hal_time_stamp() ); 360 361 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); 346 362 347 363 // initialise RPC descriptor header … … 363 379 *error = (error_t)rpc.args[5]; 364 380 365 rpc_dmsg("\n[INFO] %s : completed \n", __FUNCTION__);381 rpc_dmsg("\n[INFO] %s : completed at cycle %d\n", __FUNCTION__ , hal_time_stamp() ); 366 382 } 367 383 … … 378 394 void * start_arg; 379 395 error_t error; 396 397 rpc_dmsg("\n[INFO] %s : enter at cycle %d\n", __FUNCTION__ , hal_time_stamp() ); 380 398 381 399 // get client cluster identifier and pointer on RPC descriptor … … 409 427 hal_remote_swd( XPTR( client_cxy , &desc->args[1] ) , (uint64_t)error ); 410 428 hal_remote_swd( XPTR( client_cxy , &desc->args[2] ) , (uint64_t)thread_xp ); 429 430 rpc_dmsg("\n[INFO] %s : completed at cycle %d\n", __FUNCTION__ , hal_time_stamp() ); 411 431 } 412 432 … … 423 443 error_t * error ) // out 424 444 { 425 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");426 427 rpc_dmsg("\n[INFO] %s : enter\n", __FUNCTION__);445 rpc_dmsg("\n[INFO] %s : enter at cycle %d\n", __FUNCTION__ , hal_time_stamp() ); 446 447 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); 428 448 429 449 // initialise RPC descriptor header … … 444 464 *error = (error_t)rpc.args[4]; 445 465 446 rpc_dmsg("\n[INFO] %s : completed \n", __FUNCTION__);466 rpc_dmsg("\n[INFO] %s : completed at cycle %d\n", __FUNCTION__ , hal_time_stamp() ); 447 467 } 448 468 … … 455 475 error_t error; 456 476 477 rpc_dmsg("\n[INFO] %s : enter at cycle %d\n", __FUNCTION__ , hal_time_stamp() ); 478 457 479 // get client cluster identifier and pointer on RPC descriptor 458 480 cxy_t client_cxy = (cxy_t)GET_CXY( xp ); … … 474 496 hal_remote_swd( XPTR( client_cxy , &desc->args[1] ) , (uint64_t)error ); 475 497 hal_remote_swd( XPTR( client_cxy , &desc->args[2] ) , (uint64_t)thread_xp ); 498 499 rpc_dmsg("\n[INFO] %s : completed at cycle %d\n", __FUNCTION__ , hal_time_stamp() ); 476 500 } 477 501 … … 485 509 uint32_t sig_id ) // in 486 510 { 487 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");488 489 rpc_dmsg("\n[INFO] %s : enter\n", __FUNCTION__);511 rpc_dmsg("\n[INFO] %s : enter at cycle %d\n", __FUNCTION__ , hal_time_stamp() ); 512 513 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); 490 514 491 515 // initialise RPC descriptor header … … 501 525 rpc_send_sync( cxy , &rpc ); 502 526 503 rpc_dmsg("\n[INFO] %s : completed \n", __FUNCTION__);527 rpc_dmsg("\n[INFO] %s : completed at cycle %d\n", __FUNCTION__ , hal_time_stamp() ); 504 528 } 505 529 … … 509 533 process_t * process; // local pointer on process descriptor 510 534 uint32_t sig_id; // signal index 535 536 rpc_dmsg("\n[INFO] %s : enter at cycle %d\n", __FUNCTION__ , hal_time_stamp() ); 511 537 512 538 // get client cluster identifier and pointer on RPC descriptor … … 520 546 // call local kernel function 521 547 signal_rise( process , sig_id ); 548 549 rpc_dmsg("\n[INFO] %s : completed at cycle %d\n", __FUNCTION__ , hal_time_stamp() ); 522 550 } 523 551 … … 539 567 error_t * error ) // out 540 568 { 541 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");542 543 rpc_dmsg("\n[INFO] %s : enter\n", __FUNCTION__);569 rpc_dmsg("\n[INFO] %s : enter at cycle %d\n", __FUNCTION__ , hal_time_stamp() ); 570 571 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); 544 572 545 573 // initialise RPC descriptor header … … 565 593 *error = (error_t)rpc.args[9]; 566 594 567 rpc_dmsg("\n[INFO] %s : completed \n", __FUNCTION__);595 rpc_dmsg("\n[INFO] %s : completed at cycle %d\n", __FUNCTION__ , hal_time_stamp() ); 568 596 } 569 597 … … 582 610 error_t error; 583 611 612 rpc_dmsg("\n[INFO] %s : enter at cycle %d\n", __FUNCTION__ , hal_time_stamp() ); 613 584 614 // get client cluster identifier and pointer on RPC descriptor 585 615 cxy_t client_cxy = (cxy_t)GET_CXY( xp ); … … 610 640 hal_remote_swd( XPTR( client_cxy , &desc->args[8] ) , (uint64_t)inode_xp ); 611 641 hal_remote_swd( XPTR( client_cxy , &desc->args[9] ) , (uint64_t)error ); 642 643 rpc_dmsg("\n[INFO] %s : completed at cycle %d\n", __FUNCTION__ , hal_time_stamp() ); 612 644 } 613 645 … … 620 652 struct vfs_inode_s * inode ) 621 653 { 622 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");623 624 rpc_dmsg("\n[INFO] %s : enter\n", __FUNCTION__);654 rpc_dmsg("\n[INFO] %s : enter at cycle %d\n", __FUNCTION__ , hal_time_stamp() ); 655 656 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); 625 657 626 658 // initialise RPC descriptor header … … 635 667 rpc_send_sync( cxy , &rpc ); 636 668 637 rpc_dmsg("\n[INFO] %s : completed \n", __FUNCTION__);669 rpc_dmsg("\n[INFO] %s : completed at cycle %d\n", __FUNCTION__ , hal_time_stamp() ); 638 670 } 639 671 … … 642 674 { 643 675 vfs_inode_t * inode; 676 677 rpc_dmsg("\n[INFO] %s : enter at cycle %d\n", __FUNCTION__ , hal_time_stamp() ); 644 678 645 679 // get client cluster identifier and pointer on RPC descriptor … … 652 686 // call local kernel function 653 687 vfs_inode_destroy( inode ); 688 689 rpc_dmsg("\n[INFO] %s : completed at cycle %d\n", __FUNCTION__ , hal_time_stamp() ); 654 690 } 655 691 … … 666 702 error_t * error ) // out 667 703 { 668 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");669 670 rpc_dmsg("\n[INFO] %s : enter\n", __FUNCTION__);704 rpc_dmsg("\n[INFO] %s : enter at cycle %d\n", __FUNCTION__ , hal_time_stamp() ); 705 706 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); 671 707 672 708 // initialise RPC descriptor header … … 687 723 *error = (error_t)rpc.args[4]; 688 724 689 rpc_dmsg("\n[INFO] %s : completed \n", __FUNCTION__);725 rpc_dmsg("\n[INFO] %s : completed at cycle %d\n", __FUNCTION__ , hal_time_stamp() ); 690 726 } 691 727 … … 701 737 char name_copy[CONFIG_VFS_MAX_NAME_LENGTH]; 702 738 739 rpc_dmsg("\n[INFO] %s : enter at cycle %d\n", __FUNCTION__ , hal_time_stamp() ); 740 703 741 // get client cluster identifier and pointer on RPC descriptor 704 742 cxy_t client_cxy = (cxy_t)GET_CXY( xp ); … … 709 747 name = (char *)(intptr_t) hal_remote_lwd( XPTR( client_cxy , &desc->args[1] ) ); 710 748 parent = (vfs_inode_t *)(intptr_t)hal_remote_lwd( XPTR( client_cxy , &desc->args[2] ) ); 711 749 712 750 // makes a local copy of name 713 751 hal_remote_strcpy( XPTR( local_cxy , name_copy ), … … 719 757 parent, 720 758 &dentry_xp ); 721 722 759 // set output arguments 723 760 hal_remote_swd( XPTR( client_cxy , &desc->args[3] ) , (uint64_t)dentry_xp ); 724 761 hal_remote_swd( XPTR( client_cxy , &desc->args[4] ) , (uint64_t)error ); 762 763 rpc_dmsg("\n[INFO] %s : completed at cycle %d\n", __FUNCTION__ , hal_time_stamp() ); 725 764 } 726 765 … … 728 767 // [13] Marshaling functions attached to RPC_VFS_DENTRY_DESTROY 729 768 ///////////////////////////////////////////////////////////////////////////////////////// 769 730 770 731 771 /////////////////////////////////////////////////////// … … 733 773 vfs_dentry_t * dentry ) 734 774 { 735 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");736 737 rpc_dmsg("\n[INFO] %s : enter\n", __FUNCTION__);775 rpc_dmsg("\n[INFO] %s : enter at cycle %d\n", __FUNCTION__ , hal_time_stamp() ); 776 777 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); 738 778 739 779 // initialise RPC descriptor header … … 748 788 rpc_send_sync( cxy , &rpc ); 749 789 750 rpc_dmsg("\n[INFO] %s : completed \n", __FUNCTION__);790 rpc_dmsg("\n[INFO] %s : completed at cycle %d\n", __FUNCTION__ , hal_time_stamp() ); 751 791 } 752 792 … … 755 795 { 756 796 vfs_dentry_t * dentry; 797 798 rpc_dmsg("\n[INFO] %s : enter at cycle %d\n", __FUNCTION__ , hal_time_stamp() ); 757 799 758 800 // get client cluster identifier and pointer on RPC descriptor … … 765 807 // call local kernel function 766 808 vfs_dentry_destroy( dentry ); 809 810 rpc_dmsg("\n[INFO] %s : completed at cycle %d\n", __FUNCTION__ , hal_time_stamp() ); 767 811 } 768 812 … … 779 823 error_t * error ) // out 780 824 { 781 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");782 783 rpc_dmsg("\n[INFO] %s : enter\n", __FUNCTION__);825 rpc_dmsg("\n[INFO] %s : enter at cycle %d\n", __FUNCTION__ , hal_time_stamp() ); 826 827 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); 784 828 785 829 // initialise RPC descriptor header … … 799 843 *error = (error_t)rpc.args[3]; 800 844 801 rpc_dmsg("\n[INFO] %s : completed \n", __FUNCTION__);845 rpc_dmsg("\n[INFO] %s : completed at cycle %d\n", __FUNCTION__ , hal_time_stamp() ); 802 846 } 803 847 … … 809 853 xptr_t file_xp; 810 854 error_t error; 855 856 rpc_dmsg("\n[INFO] %s : enter at cycle %d\n", __FUNCTION__ , hal_time_stamp() ); 811 857 812 858 // get client cluster identifier and pointer on RPC descriptor … … 826 872 hal_remote_swd( XPTR( client_cxy , &desc->args[2] ) , (uint64_t)file_xp ); 827 873 hal_remote_swd( XPTR( client_cxy , &desc->args[3] ) , (uint64_t)error ); 874 875 rpc_dmsg("\n[INFO] %s : completed at cycle %d\n", __FUNCTION__ , hal_time_stamp() ); 828 876 } 829 877 … … 836 884 vfs_file_t * file ) 837 885 { 838 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");839 840 rpc_dmsg("\n[INFO] %s : enter\n", __FUNCTION__);886 rpc_dmsg("\n[INFO] %s : enter at cycle %d\n", __FUNCTION__ , hal_time_stamp() ); 887 888 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); 841 889 842 890 // initialise RPC descriptor header … … 851 899 rpc_send_sync( cxy , &rpc ); 852 900 853 rpc_dmsg("\n[INFO] %s : completed \n", __FUNCTION__);901 rpc_dmsg("\n[INFO] %s : completed at cycle %d\n", __FUNCTION__ , hal_time_stamp() ); 854 902 } 855 903 … … 858 906 { 859 907 vfs_file_t * file; 908 909 rpc_dmsg("\n[INFO] %s : enter at cycle %d\n", __FUNCTION__ , hal_time_stamp() ); 860 910 861 911 // get client cluster identifier and pointer on RPC descriptor … … 868 918 // call local kernel function 869 919 vfs_file_destroy( file ); 920 921 rpc_dmsg("\n[INFO] %s : completed at cycle %d\n", __FUNCTION__ , hal_time_stamp() ); 870 922 } 871 923 … … 881 933 error_t * error ) // out 882 934 { 883 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");884 885 rpc_dmsg("\n[INFO] %s : enter\n", __FUNCTION__);935 rpc_dmsg("\n[INFO] %s : enter at cycle %d\n", __FUNCTION__ , hal_time_stamp() ); 936 937 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); 886 938 887 939 // initialise RPC descriptor header … … 901 953 *error = (error_t)rpc.args[3]; 902 954 903 rpc_dmsg("\n[INFO] %s : completed \n", __FUNCTION__);955 rpc_dmsg("\n[INFO] %s : completed at cycle %d\n", __FUNCTION__ , hal_time_stamp() ); 904 956 } 905 957 … … 914 966 char name_copy[CONFIG_VFS_MAX_NAME_LENGTH]; 915 967 968 rpc_dmsg("\n[INFO] %s : enter at cycle %d\n", __FUNCTION__ , hal_time_stamp() ); 969 916 970 // get client cluster identifier and pointer on RPC descriptor 917 971 cxy_t client_cxy = (cxy_t)GET_CXY( xp ); … … 932 986 // set output argument 933 987 hal_remote_swd( XPTR( client_cxy , &desc->args[3] ) , (uint64_t)error ); 988 989 rpc_dmsg("\n[INFO] %s : completed at cycle %d\n", __FUNCTION__ , hal_time_stamp() ); 934 990 } 935 991 … … 943 999 error_t * error ) // out 944 1000 { 945 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");946 947 rpc_dmsg("\n[INFO] %s : enter\n", __FUNCTION__);1001 rpc_dmsg("\n[INFO] %s : enter at cycle %d\n", __FUNCTION__ , hal_time_stamp() ); 1002 1003 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); 948 1004 949 1005 // initialise RPC descriptor header … … 961 1017 *error = (error_t)rpc.args[1]; 962 1018 963 rpc_dmsg("\n[INFO] %s : completed \n", __FUNCTION__);1019 rpc_dmsg("\n[INFO] %s : completed at cycle %d\n", __FUNCTION__ , hal_time_stamp() ); 964 1020 } 965 1021 … … 970 1026 vfs_inode_t * inode; 971 1027 1028 rpc_dmsg("\n[INFO] %s : enter at cycle %d\n", __FUNCTION__ , hal_time_stamp() ); 1029 972 1030 // get client cluster identifier and pointer on RPC descriptor 973 1031 cxy_t client_cxy = (cxy_t)GET_CXY( xp ); … … 982 1040 // set output argument 983 1041 hal_remote_swd( XPTR( client_cxy , &desc->args[3] ) , (uint64_t)error ); 1042 1043 rpc_dmsg("\n[INFO] %s : completed at cycle %d\n", __FUNCTION__ , hal_time_stamp() ); 984 1044 } 985 1045 … … 996 1056 error_t * error ) // out 997 1057 { 998 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");999 1000 rpc_dmsg("\n[INFO] %s : enter\n", __FUNCTION__);1058 rpc_dmsg("\n[INFO] %s : enter at cycle %d\n", __FUNCTION__ , hal_time_stamp() ); 1059 1060 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); 1001 1061 1002 1062 // initialise RPC descriptor header … … 1017 1077 *error = (error_t)rpc.args[4]; 1018 1078 1019 rpc_dmsg("\n[INFO] %s : completed \n", __FUNCTION__);1079 rpc_dmsg("\n[INFO] %s : completed at cycle %d\n", __FUNCTION__ , hal_time_stamp() ); 1020 1080 } 1021 1081 … … 1029 1089 error_t error; 1030 1090 1091 rpc_dmsg("\n[INFO] %s : enter at cycle %d\n", __FUNCTION__ , hal_time_stamp() ); 1092 1031 1093 // get client cluster identifier and pointer on RPC descriptor 1032 1094 cxy_t client_cxy = (cxy_t)GET_CXY( xp ); … … 1044 1106 hal_remote_swd( XPTR( client_cxy , &desc->args[3] ) , (uint64_t)cluster ); 1045 1107 hal_remote_swd( XPTR( client_cxy , &desc->args[4] ) , (uint64_t)error ); 1108 1109 rpc_dmsg("\n[INFO] %s : completed at cycle %d\n", __FUNCTION__ , hal_time_stamp() ); 1046 1110 } 1047 1111 … … 1056 1120 xptr_t * vseg_xp ) // out 1057 1121 { 1058 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");1059 1060 rpc_dmsg("\n[INFO] %s : enter\n", __FUNCTION__);1122 rpc_dmsg("\n[INFO] %s : enter at cycle %d\n", __FUNCTION__ , hal_time_stamp() ); 1123 1124 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); 1061 1125 1062 1126 // initialise RPC descriptor header … … 1075 1139 *vseg_xp = rpc.args[2]; 1076 1140 1077 rpc_dmsg("\n[INFO] %s : completed \n", __FUNCTION__);1141 rpc_dmsg("\n[INFO] %s : completed at cycle %d\n", __FUNCTION__ , hal_time_stamp() ); 1078 1142 } 1079 1143 … … 1085 1149 vseg_t * vseg_ptr; 1086 1150 xptr_t vseg_xp; 1151 1152 rpc_dmsg("\n[INFO] %s : enter at cycle %d\n", __FUNCTION__ , hal_time_stamp() ); 1087 1153 1088 1154 // get client cluster identifier and pointer on RPC descriptor … … 1101 1167 else vseg_xp = XPTR( local_cxy , vseg_ptr ); 1102 1168 hal_remote_swd( XPTR( client_cxy , &desc->args[2] ) , (uint64_t)vseg_xp ); 1169 1170 rpc_dmsg("\n[INFO] %s : completed at cycle %d\n", __FUNCTION__ , hal_time_stamp() ); 1103 1171 } 1104 1172 … … 1116 1184 error_t * error ) // out 1117 1185 { 1118 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");1119 1120 rpc_dmsg("\n[INFO] %s : enter\n", __FUNCTION__);1186 rpc_dmsg("\n[INFO] %s : enter at cycle %d\n", __FUNCTION__ , hal_time_stamp() ); 1187 1188 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); 1121 1189 1122 1190 // initialise RPC descriptor header … … 1137 1205 *error = (error_t)rpc.args[4]; 1138 1206 1139 rpc_dmsg("\n[INFO] %s : completed \n", __FUNCTION__);1207 rpc_dmsg("\n[INFO] %s : completed at cycle %d\n", __FUNCTION__ , hal_time_stamp() ); 1140 1208 } 1141 1209 … … 1148 1216 ppn_t ppn; 1149 1217 error_t error; 1218 1219 rpc_dmsg("\n[INFO] %s : enter at cycle %d\n", __FUNCTION__ , hal_time_stamp() ); 1150 1220 1151 1221 // get client cluster identifier and pointer on RPC descriptor … … 1164 1234 hal_remote_swd( XPTR( client_cxy , &desc->args[3] ) , (uint64_t)ppn ); 1165 1235 hal_remote_swd( XPTR( client_cxy , &desc->args[4] ) , (uint64_t)error ); 1236 1237 rpc_dmsg("\n[INFO] %s : completed at cycle %d\n", __FUNCTION__ , hal_time_stamp() ); 1166 1238 } 1167 1239 … … 1175 1247 xptr_t * buf_xp ) // out 1176 1248 { 1177 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");1178 1179 rpc_dmsg("\n[INFO] %s : enter\n", __FUNCTION__);1249 rpc_dmsg("\n[INFO] %s : enter at cycle %d\n", __FUNCTION__ , hal_time_stamp() ); 1250 1251 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); 1180 1252 1181 1253 // initialise RPC descriptor header … … 1193 1265 *buf_xp = (xptr_t)rpc.args[1]; 1194 1266 1195 rpc_dmsg("\n[INFO] %s : completed \n", __FUNCTION__);1267 rpc_dmsg("\n[INFO] %s : completed at cycle %d\n", __FUNCTION__ , hal_time_stamp() ); 1196 1268 } 1197 1269 … … 1199 1271 void rpc_kcm_alloc_server( xptr_t xp ) 1200 1272 { 1273 rpc_dmsg("\n[INFO] %s : enter at cycle %d\n", __FUNCTION__ , hal_time_stamp() ); 1274 1201 1275 // get client cluster identifier and pointer on RPC descriptor 1202 1276 cxy_t client_cxy = (cxy_t)GET_CXY( xp ); … … 1215 1289 xptr_t buf_xp = XPTR( local_cxy , buf_ptr ); 1216 1290 hal_remote_swd( XPTR( client_cxy , &desc->args[1] ) , (uint64_t)buf_xp ); 1291 1292 rpc_dmsg("\n[INFO] %s : completed at cycle %d\n", __FUNCTION__ , hal_time_stamp() ); 1217 1293 } 1218 1294 … … 1226 1302 uint32_t kmem_type ) // in 1227 1303 { 1228 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");1229 1230 rpc_dmsg("\n[INFO] %s : enter\n", __FUNCTION__);1304 rpc_dmsg("\n[INFO] %s : enter at cycle %d\n", __FUNCTION__ , hal_time_stamp() ); 1305 1306 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); 1231 1307 1232 1308 // initialise RPC descriptor header … … 1242 1318 rpc_send_sync( cxy , &rpc ); 1243 1319 1244 rpc_dmsg("\n[INFO] %s : completed \n", __FUNCTION__);1320 rpc_dmsg("\n[INFO] %s : completed at cycle %d\n", __FUNCTION__ , hal_time_stamp() ); 1245 1321 } 1246 1322 … … 1248 1324 void rpc_kcm_free_server( xptr_t xp ) 1249 1325 { 1326 rpc_dmsg("\n[INFO] %s : enter at cycle %d\n", __FUNCTION__ , hal_time_stamp() ); 1327 1250 1328 // get client cluster identifier and pointer on RPC descriptor 1251 1329 cxy_t client_cxy = (cxy_t)GET_CXY( xp ); … … 1261 1339 req.ptr = buf; 1262 1340 kmem_free( &req ); 1341 1342 rpc_dmsg("\n[INFO] %s : completed at cycle %d\n", __FUNCTION__ , hal_time_stamp() ); 1263 1343 } 1264 1344 … … 1277 1357 error_t * error ) // out 1278 1358 { 1279 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");1280 1281 rpc_dmsg("\n[INFO] %s : enter\n", __FUNCTION__);1359 rpc_dmsg("\n[INFO] %s : enter at cycle %d\n", __FUNCTION__ , hal_time_stamp() ); 1360 1361 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); 1282 1362 1283 1363 // initialise RPC descriptor header … … 1300 1380 *error = (error_t)rpc.args[6]; 1301 1381 1302 rpc_dmsg("\n[INFO] %s : completed \n", __FUNCTION__);1382 rpc_dmsg("\n[INFO] %s : completed at cycle %d\n", __FUNCTION__ , hal_time_stamp() ); 1303 1383 } 1304 1384 … … 1314 1394 error_t error; 1315 1395 1396 rpc_dmsg("\n[INFO] %s : enter at cycle %d\n", __FUNCTION__ , hal_time_stamp() ); 1397 1316 1398 // get client cluster identifier and pointer on RPC descriptor 1317 1399 cxy_t client_cxy = (cxy_t)GET_CXY( xp ); … … 1336 1418 // set output argument to client RPC descriptor 1337 1419 hal_remote_swd( XPTR( client_cxy , &desc->args[6] ) , (uint64_t)error ); 1420 1421 rpc_dmsg("\n[INFO] %s : completed at cycle %d\n", __FUNCTION__ , hal_time_stamp() ); 1338 1422 } 1339 1423 … … 1353 1437 thread_t * this = CURRENT_THREAD; 1354 1438 1355 rpc_dmsg("\n[INFO] %s : enter / client_cxy = %x / server_cxy = %x \n",1356 __FUNCTION__ , local_cxy , server_cxy);1439 rpc_dmsg("\n[INFO] %s : enter / client_cxy = %x / server_cxy = %x / cycle %d\n", 1440 __FUNCTION__ , local_cxy , server_cxy , hal_time_stamp() ); 1357 1441 1358 1442 // allocate and initialise an extended pointer on the RPC descriptor … … 1374 1458 { 1375 1459 printk("\n[WARNING] %s : cluster %x cannot post RPC to cluster %x\n", 1376 __FUNCTION__ , local_cxy , server_cxy ); 1377 1378 if( thread_can_yield() ) sched_yield(); 1379 } 1380 else 1381 { 1460 __FUNCTION__ , local_cxy , server_cxy ); 1461 1462 if( thread_can_yield() ) sched_yield( NULL ); 1382 1463 } 1383 1464 } 1384 1465 while( error ); 1385 1466 1386 rpc_dmsg("\n[INFO] %s : RPC registered / client_cxy = %x / server_cxy = %x\n",1387 __FUNCTION__ , local_cxy , server_cxy , first);1467 rpc_dmsg("\n[INFO] %s : RPC %l registered / server_cxy = %x / cycle %d\n", 1468 __FUNCTION__ , desc_xp , server_cxy , hal_time_stamp() ); 1388 1469 1389 1470 // send IPI to remote CP0, if this is the first RPC in remote FIFO, … … 1399 1480 1400 1481 rpc_dmsg("\n[INFO] %s : IPI sent / client_cxy = %x / server_cxy = %x\n", 1401 1482 __FUNCTION__, local_cxy , server_cxy ); 1402 1483 } 1403 1484 } … … 1416 1497 if( this->type == THREAD_RPC ) hal_restore_irq( sr_save ); 1417 1498 1418 rpc_dmsg("\n[INFO] %s : completed / client_cxy = %x / server_cxy = %x \n",1419 __FUNCTION__ , local_cxy , server_cxy);1499 rpc_dmsg("\n[INFO] %s : completed / client_cxy = %x / server_cxy = %x / cycle %d\n", 1500 __FUNCTION__ , local_cxy , server_cxy , hal_time_stamp() ); 1420 1501 1421 1502 } // end rpc_send_sync() … … 1447 1528 error_t error; 1448 1529 1449 this = CURRENT_THREAD; 1450 core = this->core; 1451 1452 1530 this = CURRENT_THREAD; 1531 core = this->core; 1532 count = 0; 1533 1534 rpc_dmsg("\n[INFO] %s : enter / thread %x on core[%x,%d] / fifo = %x / cycle %d\n", 1535 __FUNCTION__, this->trdid, local_cxy, core->lid , hal_time_stamp() ); 1536 1453 1537 // handle up to CONFIG_RPC_PENDING_MAX requests before exit 1454 count = 0;1455 1538 do 1456 1539 { 1457 error = local_fifo_get_item( &rpc_fifo->fifo, 1458 (uint64_t *)&xp ); 1540 error = local_fifo_get_item( &rpc_fifo->fifo, (uint64_t *)&xp ); 1459 1541 1460 1542 if ( error == 0 ) // One RPC request successfully extracted from RPC_FIFO 1461 1543 { 1462 rpc_dmsg("\n[INFO] %s : RPC_THREAD %x on core %x in cluster %x handles RPC %d\n",1463 __FUNCTION__ , this->trdid , core->lid , local_cxy , count );1464 1465 1544 // get client cluster identifier and pointer on RPC descriptor 1466 1545 client_cxy = (cxy_t)GET_CXY( xp ); … … 1468 1547 1469 1548 // get rpc index from RPC descriptor 1470 index = hal_remote_lw( XPTR( client_cxy , &desc->index ) ); 1549 index = hal_remote_lw( XPTR( client_cxy , &desc->index ) ); 1550 1551 rpc_dmsg("\n[INFO] %s : thread %x on core [%x,%d] / index = %d / &rpc = %x\n", 1552 __FUNCTION__ , this->trdid , core->lid , local_cxy , index , rpc_server[index] ); 1471 1553 1472 1554 // call the relevant server function … … 1533 1615 if( found ) // activate this idle RPC thread 1534 1616 { 1617 // unblock it 1535 1618 thread->blocked = 0; 1536 1619 1537 rpc_dmsg("\n[INFO] %s : activate RPC thread %x on core %x in cluster %x atcycle %d\n",1538 __FUNCTION__ , thread , core->gid , local_cxy , hal_ get_cycles() );1620 rpc_dmsg("\n[INFO] %s : activate RPC thread %x on core [%x,%d] / cycle %d\n", 1621 __FUNCTION__ , thread , core->gid , local_cxy , hal_time_stamp() ); 1539 1622 } 1540 1623 else // create a new RPC thread 1541 1624 { 1625 // create new thread 1542 1626 error = thread_kernel_create( &thread, 1543 1627 THREAD_RPC, … … 1553 1637 } 1554 1638 1555 rpc_dmsg("\n[INFO] %s : create RPC thread %x on core %x in cluster %x at cycle %d\n",1556 __FUNCTION__ , thread , core->gid , local_cxy , hal_get_cycles() );1639 // unblock new thread 1640 thread->blocked = 0; 1557 1641 1558 1642 // update core descriptor counter 1559 1643 hal_atomic_add( &LOCAL_CLUSTER->rpc_threads , 1 ); 1644 1645 rpc_dmsg("\n[INFO] %s : create RPC thread %x on core [%x,%d] / cycle %d\n", 1646 __FUNCTION__ , thread->trdid, local_cxy, core->lid, hal_time_stamp() ); 1560 1647 } 1561 1648 … … 1563 1650 rpc_fifo->owner = thread->trdid; 1564 1651 1565 // current thread deschedules / RPC thread start execution1566 sched_ switch_to( thread );1652 // current thread switch to RPC thread 1653 sched_yield( thread ); 1567 1654 1568 1655 // restore IRQs for the calling thread … … 1581 1668 error_t error; 1582 1669 1670 rpc_dmsg("\n[INFO] %s : enter in cluster %x\n", 1671 __FUNCTION__ , local_cxy ); 1672 1583 1673 // calling thread does nothing if light lock already taken or FIFO empty 1584 1674 if( (rpc_fifo->owner != 0) || (local_fifo_is_empty( &rpc_fifo->fifo )) ) 1585 1675 { 1676 rpc_dmsg("\n[INFO] %s : exit but do nothing in cluster %x\n", 1677 __FUNCTION__ , local_cxy ); 1678 1586 1679 return false; 1587 1680 } … … 1601 1694 } 1602 1695 1696 rpc_dmsg("\n[INFO] %s : exit after activating an RPC thread in cluster %x\n", 1697 __FUNCTION__ , local_cxy ); 1698 1603 1699 return true; 1604 1700 } 1605 1701 else // light lock taken by another thread 1606 1702 { 1703 rpc_dmsg("\n[INFO] %s : exit but do nothing in cluster %x\n", 1704 __FUNCTION__ , local_cxy ); 1705 1607 1706 return false; 1608 1707 } … … 1613 1712 void rpc_thread_func() 1614 1713 { 1615 // makes the callingthread not preemptable1714 // makes the RPC thread not preemptable 1616 1715 hal_disable_irq( NULL ); 1617 1716 … … 1619 1718 rpc_fifo_t * rpc_fifo = &LOCAL_CLUSTER->rpc_fifo; 1620 1719 1621 rpc_dmsg("\n[INFO] RPC thread %x created on core %d in cluster %x atcycle %d\n",1622 this->trdid , this->core->lid , local_cxy , hal_get_cycles() );1720 rpc_dmsg("\n[INFO] %s : enter / thread %x on core[%x,%d] / cycle %d\n", 1721 __FUNCTION__, this->trdid, local_cxy, this->core->lid, hal_time_stamp() ); 1623 1722 1624 1723 // this infinite loop is not preemptable … … 1629 1728 if( this->trdid != rpc_fifo->owner ) 1630 1729 { 1631 printk("\n[PANIC] in %s : RPC_THREAD %x not owner of RPC_FIFO in cluster %x\n",1632 __FUNCTION__ , this->trdid , local_cxy);1730 printk("\n[PANIC] in %s : thread %x on core[%x,%d] not owner of RPC_FIFO\n", 1731 __FUNCTION__, this->trdid, local_cxy, this->core->lid ); 1633 1732 hal_core_sleep(); 1634 1733 } … … 1644 1743 if( LOCAL_CLUSTER->rpc_threads >= CONFIG_RPC_THREADS_MAX ) 1645 1744 { 1646 rpc_dmsg("\n[INFO] RPC thread %x suicide on core %d in cluster %x atcycle %d\n",1647 this->trdid , this->core->lid , local_cxy , hal_get_cycles() );1745 rpc_dmsg("\n[INFO] thread %x on core[%x,%d] suicide / cycle %d\n", 1746 __FUNCTION__, this->trdid, local_cxy, this->core->lid, hal_time_stamp() ); 1648 1747 1649 1748 // update core descriptor counter … … 1655 1754 else 1656 1755 { 1657 rpc_dmsg("\n[INFO] RPC thread %x blocks on core %d in cluster %x atcycle %d\n",1658 this->trdid , this->core->lid , local_cxy , hal_get_cycles() );1659 1660 1661 sched_yield();1662 1663 rpc_dmsg("\n[INFO] RPC thread %x wake up on core %d in cluster %x atcycle %d\n",1664 this->trdid , this->core->lid , local_cxy , hal_get_cycles() );1756 rpc_dmsg("\n[INFO] %s : thread %x on core[%x,%d] blocks / cycle %d\n", 1757 __FUNCTION__, this->trdid, local_cxy, this->core->lid, hal_time_stamp() ); 1758 1759 thread_block( this , THREAD_BLOCKED_IDLE ); 1760 sched_yield( NULL ); 1761 1762 rpc_dmsg("\n[INFO] RPC thread %x wake up on core[%x,%d] / cycle %d\n", 1763 __FUNCTION__, this->trdid, local_cxy, this->core->lid, hal_time_stamp() ); 1665 1764 } 1666 1765 } // end while -
trunk/kernel/kern/rpc.h
r279 r296 157 157 /*********************************************************************************** 158 158 * This function is the entry point for RPC handling on the server side. 159 * It can be executed by any thread running (in kernel mode) on any core.159 * It is executed by a core receiving an IPI. 160 160 * It checks the RPC fifo, try to take the light-lock and activates (or creates) 161 161 * an RPC thread in case of success. -
trunk/kernel/kern/scheduler.c
r279 r296 30 30 #include <core.h> 31 31 #include <thread.h> 32 #include <chdev.h> 32 33 #include <scheduler.h> 34 35 /////////////////////////////////////////////////////////////////////////////////////////// 36 // Extern global variables 37 /////////////////////////////////////////////////////////////////////////////////////////// 38 39 extern chdev_directory_t chdev_dir; // allocated in kernel_init.c file 33 40 34 41 … … 144 151 thread_t * sched_select( core_t * core ) 145 152 { 146 thread_t * thread;153 thread_t * thread; 147 154 148 155 scheduler_t * sched = &core->scheduler; 156 157 sched_dmsg("\n[INFO] %s : enter core[%x,%d] / cycle %d\n", 158 __FUNCTION__ , local_cxy , core->lid , hal_time_stamp() ); 149 159 150 160 // take lock protecting sheduler lists … … 154 164 list_entry_t * last; 155 165 156 // first : scan the kernel threads list, 157 // only if this list is not empty 166 // first : scan the kernel threads list if not empty 158 167 if( list_is_empty( &sched->k_root ) == false ) 159 168 { … … 171 180 thread = LIST_ELEMENT( current , thread_t , sched_list ); 172 181 173 // return thread if runnable174 if( thread->blocked == 0)182 // return thread if not idle_thread and runnable 183 if( (thread->type != THREAD_IDLE) && (thread->blocked == 0) ) 175 184 { 176 185 // release lock 177 186 spinlock_unlock( &sched->lock ); 187 188 sched_dmsg("\n[INFO] %s : exit core[%x,%d] / k_thread = %x / cycle %d\n", 189 __FUNCTION__ , local_cxy , core->lid , thread->trdid , hal_time_stamp() ); 190 178 191 return thread; 179 192 } … … 182 195 } 183 196 184 // second : scan the user threads list, 185 // only if this list is not empty 197 // second : scan the user threads list if not empty 186 198 if( list_is_empty( &sched->u_root ) == false ) 187 199 { … … 204 216 // release lock 205 217 spinlock_unlock( &sched->lock ); 218 219 sched_dmsg("\n[INFO] %s : exit core[%x,%d] / u_thread = %x / cycle %d\n", 220 __FUNCTION__ , local_cxy , core->lid , thread->trdid , hal_time_stamp() ); 206 221 return thread; 207 222 } … … 213 228 spinlock_unlock( &sched->lock ); 214 229 230 sched_dmsg("\n[INFO] %s : exit core[%x,%d] / idle = %x / cycle %d\n", 231 __FUNCTION__ , local_cxy , core->lid , sched->idle->trdid , hal_time_stamp() ); 232 215 233 // third : return idle thread if no runnable thread 216 234 return sched->idle; 217 235 218 } // end sched_ elect()236 } // end sched_select() 219 237 220 238 ////////////////////////////////////////// … … 223 241 list_entry_t * iter; 224 242 thread_t * thread; 225 226 243 scheduler_t * sched = &core->scheduler; 244 245 sched_dmsg("\n[INFO] %s : enter / thread %x on core[%x,%d]\n", 246 __FUNCTION__, CURRENT_THREAD->trdid , local_cxy , core->lid ); 227 247 228 248 // take lock protecting threads lists … … 246 266 spinlock_unlock( &sched->lock ); 247 267 268 sched_dmsg("\n[INFO] %s : exit / thread %x on core[%x,%d]\n", 269 __FUNCTION__, CURRENT_THREAD->trdid , local_cxy , core->lid ); 270 248 271 } // end sched_handle_signals() 249 272 250 ////////////////// 251 void sched_yield( )273 /////////////////////////////////// 274 void sched_yield( thread_t * next ) 252 275 { 253 276 reg_t sr_save; 254 thread_t * next;255 277 256 278 thread_t * current = CURRENT_THREAD; … … 258 280 scheduler_t * sched = &core->scheduler; 259 281 260 if( thread_can_yield() == false ) 261 { 262 printk("\n[PANIC] in %s : thread %x for process %x on core_gid %x" 263 " has not released all locks at cycle %d\n", 264 __FUNCTION__, current->trdid, current->process->pid, 265 local_cxy , core->lid , hal_get_cycles() ); 266 hal_core_sleep(); 267 } 268 269 // desactivate IRQs 270 hal_disable_irq( &sr_save ); 282 sched_dmsg("\n[INFO] %s : thread %x on core[%x,%d] enter / cycle %d\n", 283 __FUNCTION__, current->trdid, local_cxy, core->lid, hal_time_stamp() ); 284 285 // check calling thread released all locks 286 assert( (thread_can_yield() == true), __FUNCTION__, "locks not released\n"); 271 287 272 288 // first loop on all threads to handle pending signals 273 289 sched_handle_signals( core ); 274 290 275 // second loop on threads to select next thread 276 next = sched_select( core ); 277 278 // check stack overflow for selected thread 279 if( next->signature != THREAD_SIGNATURE ) 280 { 281 printk("\n[PANIC] in %s : detected stack overflow for thread %x of process %x" 282 " on core [%x][%d]\n", 283 __FUNCTION__, next->trdid, next->process->pid, local_cxy , core->lid ); 284 hal_core_sleep(); 285 } 286 287 sched_dmsg("\n[INFO] %s on core %d in cluster %x / old thread = %x / new thread = %x\n", 288 __FUNCTION__, core->lid, local_cxy, current->trdid, next->trdid ); 289 290 // switch contexts and update scheduler state if new thread 291 if( next != current ) 292 { 293 hal_cpu_context_save( current ); 294 hal_cpu_context_restore( next ); 295 291 // second loop on threads to select next thread if required 292 if( next == NULL ) next = sched_select( core ); 293 294 // check next thread attached to same core as the calling thread 295 assert( (next->core == current->core), __FUNCTION__ , "next core != current core\n"); 296 297 // check next thread not blocked 298 assert( (next->blocked == 0), __FUNCTION__ , "next thread is blocked\n"); 299 300 // switch contexts and update scheduler state if next != current 301 if( next != current ) 302 { 303 sched_dmsg("\n[INFO] %s : trd %x (%s) on core[%x,%d] => trd %x (%s) / cycle %d\n", 304 __FUNCTION__, current->trdid, thread_type_str(current->type), local_cxy, core->lid, 305 next->trdid, thread_type_str(next->type), hal_time_stamp() ); 306 307 // calling thread desactivate IRQs 308 hal_disable_irq( &sr_save ); 309 310 // update scheduler 296 311 if( current->type == THREAD_USER ) sched->u_last = ¤t->sched_list; 297 312 else sched->k_last = ¤t->sched_list; 298 299 313 sched->current = next; 300 } 301 302 // restore IRQs 303 hal_restore_irq( sr_save ); 304 305 if( current->type != THREAD_USER ) return; 306 307 if( next == core->fpu_owner ) hal_fpu_enable(); 308 else hal_fpu_disable(); 309 314 315 // handle FPU 316 if( next->type == THREAD_USER ) return; 317 { 318 if( next == core->fpu_owner ) hal_fpu_enable(); 319 else hal_fpu_disable(); 320 } 321 322 // switch contexts 323 hal_cpu_context_save( current->cpu_context ); 324 hal_cpu_context_restore( next->cpu_context ); 325 326 // restore IRQs when calling thread resume 327 hal_restore_irq( sr_save ); 328 329 sched_dmsg("\n[INFO] %s : thread %x on core[%x,%d] / cycle %d\n", 330 __FUNCTION__, current->trdid, local_cxy, core->lid, hal_time_stamp() ); 331 } 332 else 333 { 334 sched_dmsg("\n[INFO] %s : thread %x on core[%x,%d] continue / cycle %d\n", 335 __FUNCTION__, current->trdid, local_cxy, core->lid, hal_time_stamp() ); 336 } 310 337 } // end sched_yield() 311 338 312 ////////////////////////////////////// 313 void sched_switch_to( thread_t * new ) 314 { 315 reg_t sr_save; 316 317 thread_t * current = CURRENT_THREAD; 318 core_t * core = current->core; 319 process_t * process = current->process; 320 321 // check calling thread released all locks 322 if( thread_can_yield() == false ) 323 { 324 printk("\n[PANIC] in %s : thread %x for process %x on core %d in cluster %x" 325 " has not released all locks\n", 326 __FUNCTION__, current->trdid, process->pid, core->lid, local_cxy ); 327 hal_core_sleep(); 328 } 329 330 // check new thread attached to same core as the calling thread 331 if( new->core != current->core ) 332 { 333 printk("\n[PANIC] in %s : new thread %x is attached to core %d" 334 " different from core %d of current thread\n", 335 __FUNCTION__, new->trdid, new->core->lid, core->lid , current->trdid ); 336 hal_core_sleep(); 337 } 338 339 // check new thread not blocked 340 if( new->blocked == 0 ) 341 { 342 printk("\n[PANIC] in %s for thread %x of process %x on core %d in cluster %x" 343 " : new thread %x is blocked\n", 344 __FUNCTION__, current->trdid, process->pid , core->lid, local_cxy , new->trdid ); 345 hal_core_sleep(); 346 } 347 348 // check stack overflow for new thread 349 if( new->signature != THREAD_SIGNATURE ) 350 { 351 printk("\n[PANIC] in %s : stack overflow for new thread %x of process %x" 352 " on core %d in cluster %x\n", 353 __FUNCTION__, new->trdid, process->pid , core->lid , local_cxy ); 354 hal_core_sleep(); 355 } 356 357 // desactivate IRQs 358 hal_disable_irq( &sr_save ); 359 360 // loop on all threads to handle pending signals 361 sched_handle_signals( core ); 362 363 // check stack overflow for new thread 364 if( new->signature != THREAD_SIGNATURE ) 365 { 366 printk("PANIC %s detected stack overflow for thread %x of process %x" 367 " on core %d in cluster %x\n", 368 __FUNCTION__, new->trdid, new->process->pid, core->lid, local_cxy); 369 hal_core_sleep(); 370 } 371 372 sched_dmsg("INFO : %s on core %d in cluster %x / old thread = %x / new thread = %x\n", 373 __FUNCTION__, core->lid, local_cxy, current->trdid, new->trdid ); 374 375 // switch contexts if new thread 376 hal_cpu_context_save( current ); 377 hal_cpu_context_restore( new ); 378 379 // restore IRQs 380 hal_restore_irq( sr_save ); 381 382 if( current->type != THREAD_USER ) return; 383 384 if( current == core->fpu_owner ) hal_fpu_enable(); 385 else hal_fpu_disable(); 386 387 } // end sched_switch_to() 388 339 //////////////////// 340 void sched_display() 341 { 342 list_entry_t * iter; 343 thread_t * thread; 344 uint32_t save_sr; 345 346 thread_t * current = CURRENT_THREAD; 347 core_t * core = current->core; 348 scheduler_t * sched = &core->scheduler; 349 350 // get pointers on TXT0 chdev 351 xptr_t txt0_xp = chdev_dir.txt[0]; 352 cxy_t txt0_cxy = GET_CXY( txt0_xp ); 353 chdev_t * txt0_ptr = GET_PTR( txt0_xp ); 354 355 // get extended pointer on remote TXT0 chdev lock 356 xptr_t lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock ); 357 358 // get TXT0 lock in busy waiting mode 359 remote_spinlock_lock_busy( lock_xp , &save_sr ); 360 361 nolock_printk("\n********** scheduler state for core[%x,%d] **********************\n" 362 "kernel_threads = %d / user_threads = %d / current = %x\n", 363 local_cxy , core->lid, 364 sched->k_threads_nr, sched->u_threads_nr, sched->current->trdid ); 365 366 // display kernel threads 367 LIST_FOREACH( &sched->k_root , iter ) 368 { 369 thread = LIST_ELEMENT( iter , thread_t , sched_list ); 370 nolock_printk(" - type = %s / trdid = %x / pid = %x / func = %x / blocked_vect = %x\n", 371 thread_type_str( thread->type ), thread->trdid, thread->process->pid, 372 thread->entry_func, thread->blocked ); 373 } 374 375 // display user threads 376 LIST_FOREACH( &sched->u_root , iter ) 377 { 378 thread = LIST_ELEMENT( iter , thread_t , sched_list ); 379 nolock_printk(" - type = %s / trdid = %x / pid = %x / func = %x / blocked_vect = %x\n", 380 thread_type_str( thread->type ), thread->trdid, thread->process->pid, 381 thread->entry_func, thread->blocked ); 382 } 383 384 // release TXT0 lock 385 remote_spinlock_unlock_busy( lock_xp , save_sr ); 386 387 } // end sched_display() 388 -
trunk/kernel/kern/scheduler.h
r279 r296 36 36 /********************************************************************************************* 37 37 * This structure define the scheduler associated to a given core. 38 * WARNING : the idle thread is executed when there is no runable thread in the list39 * of attached threads, but is NOT part of the list of attached threads.40 38 ********************************************************************************************/ 41 39 … … 78 76 * This function handles pending signals for all registered threads, and tries to make 79 77 * a context switch for the core running the calling thread. 80 * - If there is a runable thread (other than the current thread or the idle thread), 81 * the calling thread is descheduled, but its state is not modified. 78 * - If the <next> argument is not NULL, this next thread starts execution. 79 * - If <next> is NULL, it calls the sched_select() function. If there is a runable thread 80 * (other than current thread or idle thread), this selected thread starts execution. 82 81 * - If there is no other runable thread, the calling thread continues execution. 83 82 * - If there is no runable thread, the idle thread is executed. 83 ********************************************************************************************* 84 * @ next : local pointer on next thread to run / call sched_select() if NULL. 84 85 ********************************************************************************************/ 85 void sched_yield(); 86 87 /********************************************************************************************* 88 * This function handles pending signals for all registered threads, and make 89 * a context switch to the thread defined by the <thread> argument. 90 * If the selected thread is not attached to the same core as the calling thread, 91 * or is blocked, it causes a kernel panic. 92 ********************************************************************************************* 93 * @ new : local pointer on the thread to run. 94 ********************************************************************************************/ 95 void sched_switch_to( struct thread_s * new ); 86 void sched_yield( struct thread_s * next ); 96 87 97 88 /********************************************************************************************* … … 130 121 131 122 /********************************************************************************************* 132 * This function scan the list of kernel threads to find an idle (blocked) RPC thread. 133 ********************************************************************************************* 134 * @ core : local pointer on the core descriptor. 135 * @ returns pointer on RPC thread descriptor / returns NULL if no idle RPC thread. 123 * This function display the internal state of the calling core scheduler. 136 124 ********************************************************************************************/ 137 struct thread_s * sched_get_rpc_thead( struct core_s * core ); 138 125 void sched_display(); 139 126 140 127 -
trunk/kernel/kern/thread.c
r286 r296 54 54 char * thread_type_str( uint32_t type ) 55 55 { 56 if ( type == THREAD_USER ) return "US ER";56 if ( type == THREAD_USER ) return "USR"; 57 57 else if( type == THREAD_RPC ) return "RPC"; 58 58 else if( type == THREAD_DEV ) return "DEV"; 59 else if( type == THREAD_KERNEL ) return "KER NEL";60 else if( type == THREAD_IDLE ) return "IDL E";59 else if( type == THREAD_KERNEL ) return "KER"; 60 else if( type == THREAD_IDLE ) return "IDL"; 61 61 else return "undefined"; 62 62 } … … 199 199 200 200 return 0; 201 } 201 202 } // end thread_init() 202 203 203 204 ///////////////////////////////////////////////////////// … … 309 310 *new_thread = thread; 310 311 return 0; 311 } 312 313 } // end thread_user_create() 312 314 313 315 ////////////////////////////////////////////// … … 395 397 *new_thread = thread; 396 398 return 0; 397 } 399 400 } // end thread_user_fork() 398 401 399 402 ///////////////////////////////////////////////////////// … … 407 410 thread_t * thread; // pointer on new thread descriptor 408 411 409 thread_dmsg("\n[INFO] %s : enter s for type %s in cluster %x\n",410 __FUNCTION__ , thread_type_str( type ) , local_cxy);412 thread_dmsg("\n[INFO] %s : enter / for type %s on core[%x,%d] / cycle %d\n", 413 __FUNCTION__ , thread_type_str( type ) , local_cxy , core_lid , hal_time_stamp() ); 411 414 412 415 assert( ( (type == THREAD_KERNEL) || (type == THREAD_RPC) || … … 440 443 hal_cpu_context_create( thread ); 441 444 442 thread_dmsg("\n[INFO] %s : exit in cluster %x / trdid = %x / core_lid = %d\n", 443 __FUNCTION__ , local_cxy , thread->trdid , core_lid ); 445 thread_dmsg("\n[INFO] %s : exit / trdid = %x / type = %s / core = [%x,%d] / cycle %d\n", 446 __FUNCTION__ , thread->trdid , thread_type_str(type) , 447 local_cxy , core_lid , hal_time_stamp() ); 444 448 445 449 *new_thread = thread; 446 450 return 0; 447 } 451 452 } // end thread_kernel_create() 448 453 449 454 /////////////////////////////////////////////////// … … 685 690 686 691 // deschedule 687 sched_yield( );692 sched_yield( NULL ); 688 693 return 0; 689 694 } … … 741 746 742 747 // force scheduling 743 sched_yield( );748 sched_yield( NULL ); 744 749 } 745 750 } -
trunk/kernel/libk/remote_barrier.c
r104 r296 274 274 // block & deschedule the calling thread 275 275 thread_block( thread_ptr , THREAD_BLOCKED_USERSYNC ); 276 sched_yield( );276 sched_yield( NULL ); 277 277 278 278 // restore interrupts -
trunk/kernel/libk/remote_condvar.c
r60 r296 189 189 // block the calling thread 190 190 thread_block( CURRENT_THREAD , THREAD_BLOCKED_USERSYNC ); 191 sched_yield( );191 sched_yield( NULL ); 192 192 193 193 // lock the mutex before return -
trunk/kernel/libk/remote_fifo.c
r279 r296 99 99 100 100 // deschedule without blocking 101 if( thread_can_yield() ) sched_yield( );101 if( thread_can_yield() ) sched_yield( NULL ); 102 102 103 103 // disable interrupts -
trunk/kernel/libk/remote_mutex.c
r124 r296 208 208 // block & deschedule the calling thread 209 209 thread_block( thread_ptr , THREAD_BLOCKED_USERSYNC ); 210 sched_yield( );210 sched_yield( NULL ); 211 211 212 212 // restore interrupts -
trunk/kernel/libk/remote_sem.c
r23 r296 219 219 // block and deschedule 220 220 thread_block( this , THREAD_BLOCKED_SEM ); 221 sched_yield( );221 sched_yield( NULL ); 222 222 } 223 223 } // end remote_sem_wait() -
trunk/kernel/libk/remote_spinlock.c
r101 r296 178 178 { 179 179 hal_restore_irq( mode ); 180 if( thread_can_yield() ) sched_yield( );180 if( thread_can_yield() ) sched_yield( NULL ); 181 181 hal_disable_irq( &mode ); 182 182 continue; -
trunk/kernel/libk/spinlock.c
r60 r296 107 107 { 108 108 hal_restore_irq( mode ); 109 if( thread_can_yield() ) sched_yield( );109 if( thread_can_yield() ) sched_yield( NULL ); 110 110 hal_disable_irq( &mode ); 111 111 continue; -
trunk/kernel/mm/mapper.c
r279 r296 248 248 249 249 // deschedule 250 sched_yield( );250 sched_yield( NULL ); 251 251 } 252 252 } -
trunk/kernel/mm/page.c
r238 r296 181 181 // deschedule the calling thread 182 182 thread_block( thread , THREAD_BLOCKED_PAGE ); 183 sched_yield( );183 sched_yield( NULL ); 184 184 } 185 185 else // page is not locked -
trunk/kernel/syscalls/sys_thread_exit.c
r60 r296 74 74 75 75 // deschedule 76 sched_yield( );76 sched_yield( NULL ); 77 77 } 78 78 } -
trunk/kernel/syscalls/sys_thread_join.c
r23 r296 138 138 139 139 // deschedule 140 sched_yield( );140 sched_yield( NULL ); 141 141 } 142 142 } -
trunk/kernel/syscalls/sys_thread_sleep.c
r101 r296 30 30 int sys_thread_sleep() 31 31 { 32 thread_t * this = CURRENT_THREAD; 33 32 34 thread_dmsg("\n[INFO] %s : thread %x in process %x goes to sleep at cycle %d\n", 33 __FUNCTION__, CURRENT_THREAD->trdid, CURRENT_PROCESS->pid, hal_get_cycles() );35 __FUNCTION__, this->trdid, this->process->pid, hal_get_cycles() ); 34 36 35 thread_block( CURRENT_THREAD, THREAD_BLOCKED_GLOBAL );36 sched_yield( );37 thread_block( this , THREAD_BLOCKED_GLOBAL ); 38 sched_yield( NULL ); 37 39 38 40 thread_dmsg("\n[INFO] %s : thread %x in process %x resume at cycle\n", 39 __FUNCTION__, CURRENT_THREAD->trdid, CURRENT_PROCESS->pid, hal_get_cycles() );41 __FUNCTION__, this->trdid, this->process->pid, hal_get_cycles() ); 40 42 41 43 return 0; -
trunk/kernel/syscalls/sys_thread_yield.c
r23 r296 27 27 int sys_thread_yield() 28 28 { 29 sched_yield( );29 sched_yield( NULL ); 30 30 return 0; 31 31 } -
trunk/kernel/vfs/vfs.c
r279 r296 154 154 error_t error; 155 155 156 vfs_dmsg("\n[INFO] %s : enter / local_c luster = %x / parent_cluster = %x\n",157 __FUNCTION__ , local_cxy , GET_CXY( dentry_xp ));156 vfs_dmsg("\n[INFO] %s : enter / local_cxy = %x / parent_xp = %l\n", 157 __FUNCTION__ , local_cxy , dentry_xp ); 158 158 159 159 // check fs type and get pointer on context … … 228 228 remote_spinlock_init( XPTR( local_cxy , &inode->main_lock ) ); 229 229 230 vfs_dmsg("\n[INFO] %s : e nter / local_cluster = %x / parent_cluster = %x\n",231 __FUNCTION__ , local_cxy , GET_CXY( dentry_xp ));230 vfs_dmsg("\n[INFO] %s : exit / child_xp = %l / parent_xp = %l\n", 231 __FUNCTION__ , XPTR( local_cxy , inode ) , dentry_xp ); 232 232 233 233 // return extended pointer on inode … … 435 435 kmem_req_t req; // request to kernel memory allocator 436 436 437 vfs_dmsg("\n[INFO] %s : enter for %s / parent inode = %x / cycle = %d\n", 438 __FUNCTION__ , name , parent , hal_time_stamp() ); 439 437 440 // get pointer on context 438 441 if ( fs_type == FS_TYPE_FATFS ) ctx = &fs_context[FS_TYPE_FATFS]; … … 481 484 // return extended pointer on dentry 482 485 *dentry_xp = XPTR( local_cxy , dentry ); 486 487 vfs_dmsg("\n[INFO] %s : exit for %s / cycle = %d\n", 488 __FUNCTION__ , name , hal_time_stamp() ); 483 489 484 490 return 0; … … 1522 1528 parent_ptr = (vfs_inode_t *)GET_PTR( parent_xp ); 1523 1529 1524 vfs_dmsg("\n[INFO] %s : enter in cluster %x / child_cxy = %x / parent_cxy = %x\n",1525 __FUNCTION__ , local_cxy , child_cxy , parent_cxy);1530 vfs_dmsg("\n[INFO] %s : enter in cluster %x for %s / child_cxy = %x / parent_xp = %l\n", 1531 __FUNCTION__ , local_cxy , name , child_cxy , parent_xp ); 1526 1532 1527 1533 // 1. create dentry … … 1611 1617 hal_remote_swd( XPTR( dentry_cxy , &dentry_ptr->child_xp ) , inode_xp ); 1612 1618 1619 vfs_dmsg("\n[INFO] %s : exit in cluster %x for %s\n", 1620 __FUNCTION__ , local_cxy , name ); 1621 1613 1622 // success : return extended pointer on child inode 1614 1623 *child_xp = inode_xp; -
trunk/kernel/vfs/vfs.h
r266 r296 653 653 /****************************************************************************************** 654 654 * This function creates a new couple dentry/inode, and insert it in the Inode-Tree. 655 * It can be executed by any thread running in any cluster , as this function656 * uses the rpc_dentry_create_client() and rpc_inode_create client() if required.657 * This is done in three steps:655 * It can be executed by any thread running in any cluster ( can be differente from both 656 * the child cluster and the parent cluster), as it uses the rpc_dentry_create_client() 657 * and rpc_inode_create client() if required. This is done in three steps: 658 658 * 1) The dentry is created in the cluster containing the existing <parent_xp> inode. 659 659 * The new dentry name is defined by the <name> argument.
Note: See TracChangeset
for help on using the changeset viewer.