Changeset 279 for trunk/kernel/kern
- Timestamp:
- Jul 27, 2017, 12:23:29 AM (7 years ago)
- Location:
- trunk/kernel/kern
- Files:
-
- 14 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/kernel/kern/chdev.h
r249 r279 169 169 xptr_t nic_tx[CONFIG_MAX_NIC_CHANNELS]; // external / multi-channels / shared 170 170 171 xptr_t icu[CONFIG_MAX_CLUSTERS]; // internal / single channel / shared172 171 xptr_t mmc[CONFIG_MAX_CLUSTERS]; // internal / single channel / shared 173 172 -
trunk/kernel/kern/cluster.c
r124 r279 77 77 // initialize cluster local parameters 78 78 cluster->cores_nr = info->cores_nr; 79 cluster->cores_in_kernel = info->cores_nr; // all cpus start in kernel mode79 cluster->cores_in_kernel = 0; 80 80 81 81 // initialize the lock protecting the embedded kcm allocator … … 130 130 // initialises RPC fifo 131 131 rpc_fifo_init( &cluster->rpc_fifo ); 132 cluster->rpc_threads = 0; 132 133 133 134 cluster_dmsg("\n[INFO] %s : RPC fifo inialized in cluster %x at cycle %d\n", -
trunk/kernel/kern/cluster.h
r188 r279 91 91 * This structure defines a cluster manager. 92 92 * It contains both global platform information, and cluster specific resources 93 * managed by the local kernel instance.93 * controled by the local kernel instance. 94 94 ******************************************************************************************/ 95 95 … … 99 99 100 100 // global parameters 101 102 101 uint32_t paddr_width; /*! numer of bits in physical address */ 103 102 uint32_t x_width; /*! number of bits to code x_size (can be 0) */ … … 109 108 110 109 // local parameters 111 112 110 uint32_t cores_nr; /*! number of cores in cluster */ 113 111 uint32_t cores_in_kernel; /*! number of cores currently in kernel mode */ 114 112 113 uint32_t ram_size; /*! physical memory size */ 114 uint32_t ram_base; /*! physical memory base (local address) */ 115 115 116 core_t core_tbl[CONFIG_MAX_LOCAL_CORES]; /*! embedded cores */ 116 117 118 list_entry_t dev_root; /*! root of list of devices in cluster */ 119 120 // memory allocators 117 121 ppm_t ppm; /*! embedded kernel page manager */ 118 122 khm_t khm; /*! embedded kernel heap manager */ 119 123 kcm_t kcm; /*! embedded kernel cache manager (for KCMs) */ 120 121 124 kcm_t * kcm_tbl[KMEM_TYPES_NR]; /*! pointers on allocated KCMs */ 122 125 123 uint32_t ram_size; /*! physical memory size */ 124 uint32_t ram_base; /*! physical memory base (local address) */ 125 126 rpc_fifo_t rpc_fifo; /*! cluster RPC fifo (shared) */ 127 list_entry_t devlist; /*! root of list of devices in cluster */ 128 126 // RPC 127 rpc_fifo_t rpc_fifo; /*! RPC fifo */ 128 uint32_t rpc_threads; /*! current number of RPC threads */ 129 130 // DQDT 129 131 int32_t pages_var; /*! pages number increment from last DQQT update */ 130 132 int32_t threads_var; /*! threads number increment from last DQDT update */ … … 132 134 dqdt_node_t dqdt_tbl[CONFIG_MAX_DQDT_DEPTH]; /*! embedded DQDT nodes */ 133 135 136 // Local process manager 134 137 pmgr_t pmgr; /*! embedded process manager */ 135 138 -
trunk/kernel/kern/core.c
r188 r279 50 50 core->usage = 0; 51 51 core->spurious_irqs = 0; 52 core->rpc_threads = 0;53 52 core->thread_idle = NULL; 54 53 core->fpu_owner = NULL; -
trunk/kernel/kern/core.h
r188 r279 56 56 uint32_t usage; /*! cumulated busy_percent (idle / total) */ 57 57 uint32_t spurious_irqs; /*! for instrumentation... */ 58 uint32_t rpc_threads; /*! current RPC threads number for this core */59 struct thread_s * thread_rpc; /*! pointer on current RPC thread descriptor */60 58 struct thread_s * thread_idle; /*! pointer on idle thread descriptor */ 61 59 struct thread_s * fpu_owner; /*! pointer on current FPU owner thread */ -
trunk/kernel/kern/kernel_init.c
r265 r279 28 28 #include <hal_special.h> 29 29 #include <hal_context.h> 30 #include <hal_irqmask.h> 30 31 #include <barrier.h> 31 32 #include <remote_barrier.h> … … 59 60 60 61 /////////////////////////////////////////////////////////////////////////////////////////// 61 // All the seglobal variables are replicated in all clusters.62 // All the following global variables are replicated in all clusters. 62 63 // They are initialised by the kernel_init() function. 63 64 // … … 135 136 " /_/ \\_\\ |______| |_| |_| \\_____/ |______/ |_| |_| |_| \\_\\ |_| |_| \n" 136 137 "\n\n\t\t Advanced Locality Management Operating System / Multi Kernel Hybrid\n" 137 "\n\n\t\t\t Version 0.0 : %d clusters / %d coresper cluster\n\n", nclusters , ncores );138 "\n\n\t\t\t Version 0.0 : %d cluster(s) / %d core(s) per cluster\n\n", nclusters , ncores ); 138 139 } 139 140 … … 274 275 } 275 276 276 if( local_cxy == 0 ) 277 kinit_dmsg("\n[INFO] %s created MMC chdev in cluster 0 at cycle %d\n", 278 __FUNCTION__ , local_cxy , (uint32_t)hal_time_stamp() ); 277 kinit_dmsg("\n[INFO] %s created MMC in cluster %x / chdev = %x\n", 278 __FUNCTION__ , channel , local_cxy , chdev_ptr ); 279 279 } 280 280 /////////////////////////////// … … 301 301 chdev_dir.dma[channel] = XPTR( local_cxy , chdev_ptr ); 302 302 303 kinit_dmsg("\n[INFO] %s created DMA[%d] chdev in cluster 0 at cycle %d\n",304 __FUNCTION__ , channel , (uint32_t)hal_time_stamp());303 kinit_dmsg("\n[INFO] %s created DMA[%d] in cluster %x / chdev = %x\n", 304 __FUNCTION__ , channel , local_cxy , chdev_ptr ); 305 305 } 306 306 } … … 433 433 } 434 434 435 kinit_dmsg("\n[INFO] %s create chdev %s[%d] in cluster %x at cycle %d\n", 436 __FUNCTION__ , chdev_func_str( func ), channel, 437 local_cxy , (uint32_t)hal_time_stamp() ); 435 kinit_dmsg("\n[INFO] %s create chdev %s[%d] in cluster %x / chdev = %x\n", 436 __FUNCTION__ , chdev_func_str( func ), channel , local_cxy , chdev ); 438 437 439 438 } // end if match … … 658 657 } 659 658 659 //////////////////////////////////////////////////////////////////////////////////////////// 660 // This function display on TXT0 the content of the external chdev directory, 661 // in the local cluster. 662 //////////////////////////////////////////////////////////////////////////////////////////// 663 static void chdev_dir_display( ) 664 { 665 cxy_t iob_cxy = GET_CXY( chdev_dir.iob ); 666 chdev_t * iob_ptr = (chdev_t *)GET_PTR( chdev_dir.iob ); 667 xptr_t iob_base = hal_remote_lwd( XPTR( iob_cxy , &iob_ptr->base ) ); 668 669 cxy_t pic_cxy = GET_CXY( chdev_dir.pic ); 670 chdev_t * pic_ptr = (chdev_t *)GET_PTR( chdev_dir.pic ); 671 xptr_t pic_base = hal_remote_lwd( XPTR( pic_cxy , &pic_ptr->base ) ); 672 673 cxy_t txt0_cxy = GET_CXY( chdev_dir.txt[0] ); 674 chdev_t * txt0_ptr = (chdev_t *)GET_PTR( chdev_dir.txt[0] ); 675 xptr_t txt0_base = hal_remote_lwd( XPTR( txt0_cxy , &txt0_ptr->base ) ); 676 677 cxy_t txt1_cxy = GET_CXY( chdev_dir.txt[1] ); 678 chdev_t * txt1_ptr = (chdev_t *)GET_PTR( chdev_dir.txt[1] ); 679 xptr_t txt1_base = hal_remote_lwd( XPTR( txt1_cxy , &txt1_ptr->base ) ); 680 681 cxy_t txt2_cxy = GET_CXY( chdev_dir.txt[2] ); 682 chdev_t * txt2_ptr = (chdev_t *)GET_PTR( chdev_dir.txt[2] ); 683 xptr_t txt2_base = hal_remote_lwd( XPTR( txt2_cxy , &txt2_ptr->base ) ); 684 685 cxy_t ioc_cxy = GET_CXY( chdev_dir.ioc[0] ); 686 chdev_t * ioc_ptr = (chdev_t *)GET_PTR( chdev_dir.ioc[0] ); 687 xptr_t ioc_base = hal_remote_lwd( XPTR( ioc_cxy , &ioc_ptr->base ) ); 688 689 cxy_t fbf_cxy = GET_CXY( chdev_dir.fbf[0] ); 690 chdev_t * fbf_ptr = (chdev_t *)GET_PTR( chdev_dir.fbf[0] ); 691 xptr_t fbf_base = hal_remote_lwd( XPTR( fbf_cxy , &fbf_ptr->base ) ); 692 693 cxy_t nic_rx_cxy = GET_CXY( chdev_dir.nic_rx[0] ); 694 chdev_t * nic_rx_ptr = (chdev_t *)GET_PTR( chdev_dir.nic_rx[0] ); 695 xptr_t nic_rx_base = hal_remote_lwd( XPTR( nic_rx_cxy , &nic_rx_ptr->base ) ); 696 697 cxy_t nic_tx_cxy = GET_CXY( chdev_dir.nic_tx[0] ); 698 chdev_t * nic_tx_ptr = (chdev_t *)GET_PTR( chdev_dir.nic_tx[0] ); 699 xptr_t nic_tx_base = hal_remote_lwd( XPTR( nic_tx_cxy , &nic_tx_ptr->base ) ); 700 701 printk("\n*** external chdev directory in cluster %x\n" 702 " - iob = %l / base = %l\n" 703 " - pic = %l / base = %l\n" 704 " - txt[0] = %l / base = %l\n" 705 " - txt[1] = %l / base = %l\n" 706 " - txt[2] = %l / base = %l\n" 707 " - ioc[0] = %l / base = %l\n" 708 " - fbf[0] = %l / base = %l\n" 709 " - nic_rx[0] = %l / base = %l\n" 710 " - nic_tx[0] = %l / base = %l\n", 711 local_cxy, 712 chdev_dir.iob, iob_base, 713 chdev_dir.pic, pic_base, 714 chdev_dir.txt[0], txt0_base, 715 chdev_dir.txt[1], txt1_base, 716 chdev_dir.txt[2], txt2_base, 717 chdev_dir.ioc[0], ioc_base, 718 chdev_dir.fbf[0], fbf_base, 719 chdev_dir.nic_rx[0], nic_rx_base, 720 chdev_dir.nic_tx[0], nic_tx_base ); 721 } 722 660 723 /////////////////////////////////////////////////////////////////////////////////////////// 661 724 // This function is the entry point for the kernel initialisation. … … 683 746 684 747 error_t error; 748 uint32_t status; // running core status register 685 749 686 750 cxy_t io_cxy = info->io_cxy; … … 732 796 if( error ) 733 797 { 734 nolock_printk("\n[PANIC] in %s : illegal core identifiers"798 printk("\n[PANIC] in %s : illegal core identifiers" 735 799 " gid = %x / cxy = %x / lid = %d\n", 736 800 __FUNCTION__ , core_lid , core_cxy , core_lid ); … … 745 809 if( error ) 746 810 { 747 nolock_printk("\n[PANIC] in %s : cannot initialise cluster %x",811 printk("\n[PANIC] in %s : cannot initialise cluster %x", 748 812 __FUNCTION__ , local_cxy ); 749 813 hal_core_sleep(); … … 764 828 // STEP 2 : all CP0s initialize the process_zero descriptor. 765 829 // CP0 in cluster 0 initialises the IOPIC device. 766 // all CP0s complete the distibuted LAPIC initialization.767 830 ///////////////////////////////////////////////////////////////////////////////// 768 831 … … 777 840 if( (core_lid == 0) && (local_cxy == 0) ) iopic_init( info ); 778 841 779 // all CP0s initialize their local LAPIC extension,780 if( core_lid == 0 ) lapic_init( info );781 782 842 //////////////////////////////////////////////////////////////////////////////// 783 843 if( core_lid == 0 ) remote_barrier( XPTR( io_cxy , &global_barrier ), … … 791 851 792 852 //////////////////////////////////////////////////////////////////////////////// 793 // STEP 3 : all CP0s initialize their local chdev descriptors 794 // (both internal devices and external devices). 853 // STEP 3 : all CP0s complete the distibuted LAPIC initialization. 854 // all CP0s initialize their internal chdev descriptors 855 // all CP0s initialize their local external chdev descriptors 795 856 //////////////////////////////////////////////////////////////////////////////// 857 858 // all CP0s initialize their local LAPIC extension, 859 if( core_lid == 0 ) lapic_init( info ); 796 860 797 861 // CP0 scan the internal (private) peripherals, … … 818 882 819 883 ///////////////////////////////////////////////////////////////////////////////// 820 // STEP 4 : Alls cores initialize their private IDLE thread. 884 // STEP 4 : All cores enable IPI (Inter Procesor Interrupt), 885 // Alh cores initialize IDLE thread. 821 886 // Only CP0 in cluster 0 creates the VFS root inode. 822 887 // It access the boot device to initialize the file system context. 823 888 ///////////////////////////////////////////////////////////////////////////////// 824 889 825 // all cores create idle thread descriptor 890 if( CONFIG_KINIT_DEBUG ) chdev_dir_display(); 891 892 // All cores enable the shared IPI channel 893 894 // @@@ 895 hal_set_ebase( 0x1000 ); 896 // @@@ 897 898 dev_pic_enable_ipi(); 899 hal_enable_irq( &status ); 900 901 kinit_dmsg("\n[INFO] %s : IRQs enabled for core[%x,%d] / SR = %x\n", 902 __FUNCTION__ , local_cxy , core_lid , hal_get_sr() ); 903 904 // all cores create the idle thread descriptor 826 905 error = thread_kernel_init( thread, 827 906 THREAD_IDLE, … … 831 910 if( error ) 832 911 { 833 nolock_printk("\n[PANIC] in %s : core[%x][%d] cannot initialize idle thread\n",912 printk("\n[PANIC] in %s : core[%x][%d] cannot initialize idle thread\n", 834 913 __FUNCTION__ , local_cxy , core_lid ); 835 914 hal_core_sleep(); … … 860 939 fatfs_ctx_t * fatfs_ctx = fatfs_ctx_alloc(); 861 940 862 nolock_assert( (fatfs_ctx != NULL) , __FUNCTION__ ,863 941 assert( (fatfs_ctx != NULL) , __FUNCTION__ , 942 "cannot create FATFS context in cluster 0\n" ); 864 943 865 944 // 2. access boot device to initialize FATFS context … … 883 962 &vfs_root_inode_xp ); // return 884 963 885 nolock_assert( (error == 0) , __FUNCTION__ ,886 964 assert( (error == 0) , __FUNCTION__ , 965 "cannot create VFS root inode\n" ); 887 966 888 967 // 5. initialize VFS context for FAT in cluster 0 … … 896 975 else 897 976 { 898 nolock_printk("\n[PANIC] in %s : root FS must be FATFS\n", __FUNCTION__ );977 printk("\n[PANIC] in %s : root FS must be FATFS\n", __FUNCTION__ ); 899 978 hal_core_sleep(); 900 979 } … … 931 1010 fatfs_ctx_t * fatfs_ctx = fatfs_ctx_alloc(); 932 1011 933 nolock_assert( (fatfs_ctx != NULL) , __FUNCTION__ ,934 1012 assert( (fatfs_ctx != NULL) , __FUNCTION__ , 1013 "cannot create FATFS context\n" ); 935 1014 936 1015 // get local pointer on VFS context for FATFS … … 965 1044 ///////////////////////////////////////////////////////////////////////////////// 966 1045 967 if( (core_lid == 0) && (local_cxy == 0) )1046 // if( (core_lid == 0) && (local_cxy == 0) ) 968 1047 kinit_dmsg("\n[INFO] %s exit barrier 5 at cycle %d : VFS OK in all clusters\n", 969 1048 __FUNCTION__, (uint32_t)hal_time_stamp()); … … 986 1065 devfs_ctx_t * devfs_ctx = devfs_ctx_alloc(); 987 1066 988 nolock_assert( (devfs_ctx != NULL) , __FUNCTION__ ,989 1067 assert( (devfs_ctx != NULL) , __FUNCTION__ , 1068 "cannot create DEVFS context in cluster IO\n"); 990 1069 991 1070 // register DEVFS root and external directories … … 993 1072 } 994 1073 1074 printk("\n@@@ %s : cluster %x reach barrier 6\n", __FUNCTION__ , local_cxy ); 1075 995 1076 ///////////////////////////////////////////////////////////////////////////////// 996 1077 if( core_lid == 0 ) remote_barrier( XPTR( io_cxy , &global_barrier ), … … 999 1080 ///////////////////////////////////////////////////////////////////////////////// 1000 1081 1001 if( (core_lid == 0) && (local_cxy == 0) )1082 // if( (core_lid == 0) && (local_cxy == 0) ) 1002 1083 kinit_dmsg("\n[INFO] %s exit barrier 6 at cycle %d : DEVFS OK in cluster IO\n", 1003 1084 __FUNCTION__, (uint32_t)hal_time_stamp()); … … 1071 1152 print_banner( (info->x_size * info->y_size) , info->cores_nr ); 1072 1153 1073 kinit_dmsg("\n\n*** memory fooprint of main kernet objects ***\n"1154 kinit_dmsg("\n\n*** memory fooprint for main kernet objects ***\n\n" 1074 1155 " - thread descriptor : %d bytes\n" 1075 1156 " - process descriptor : %d bytes\n" … … 1114 1195 } 1115 1196 1116 // each core activates its private PTIIRQ1197 // each core activates its private TICK IRQ 1117 1198 dev_pic_enable_timer( CONFIG_SCHED_TICK_PERIOD ); 1118 1199 -
trunk/kernel/kern/printk.c
r246 r279 401 401 } 402 402 403 ////////////////////////////////////////404 void nolock_printk( char * format , ...)405 {406 va_list args;407 408 // call kernel_printf on TXT0, in busy waiting mode409 va_start( args , format );410 kernel_printf( 0 , 1 , format , &args );411 va_end( args );412 }413 414 403 /////////////////////////////////////////// 415 404 inline void assert( bool_t condition, … … 424 413 } 425 414 426 //////////////////////////////////////////////////427 inline void nolock_assert( bool_t condition,428 const char * function_name,429 char * string )430 {431 if( condition == false )432 {433 nolock_printk("\n[PANIC] in %s : %s\n" , function_name , string );434 hal_core_sleep();435 }436 }437 438 439 415 440 416 // Local Variables: -
trunk/kernel/kern/printk.h
r188 r279 74 74 75 75 /********************************************************************************** 76 * This function displays a formated string on the kernel terminal TXT0,77 * using a busy waiting policy: It calls directly the relevant TXT driver,78 * without taking the the lock protecting exclusive access to TXT0 terminal.79 **********************************************************************************80 * @ format : formated string.81 *********************************************************************************/82 void nolock_printk( char* format, ... );83 84 /**********************************************************************************85 76 * This function displays a "PANIC" message and force the calling core in 86 77 * sleeping mode if a Boolean condition is false. … … 95 86 char * string ); 96 87 97 /**********************************************************************************98 * This function displays a "PANIC" message and force the calling core in99 * sleeping mode if a Boolean condition is false,100 * without taking the the lock protecting exclusive access to TXT0 terminal.101 **********************************************************************************102 * @ condition : condition that must be true.103 * @ function_name : name of the calling function.104 * @ string : error message if condition is false.105 *********************************************************************************/106 inline void nolock_assert( bool_t condition,107 const char * function_name,108 char * string );109 110 88 /////////////////////////////////////////////////////////////////////////////////// 111 89 // Conditionnal debug macros … … 215 193 216 194 #if CONFIG_KINIT_DEBUG 217 #define kinit_dmsg(...) nolock_printk(__VA_ARGS__)195 #define kinit_dmsg(...) printk(__VA_ARGS__) 218 196 #else 219 197 #define kinit_dmsg(...) -
trunk/kernel/kern/process.c
r204 r279 90 90 pid_t parent_pid; 91 91 92 process_dmsg("\n[INFO] %s : enters for process %x in cluster %x / parent_xp = %l\n",93 __FUNCTION__ , pid , parent_xp);92 process_dmsg("\n[INFO] %s : enters for process %x in cluster %x\n", 93 __FUNCTION__ , pid , local_cxy ); 94 94 95 95 // get parent process cluster, local pointer, and pid … … 198 198 local_process->ref_xp = reference_process_xp; 199 199 200 process_dmsg("\n[INFO] %s : enter for process %x in cluster %x\n", 201 __FUNCTION__ , local_process->pid ); 202 200 203 // reset children list root (not used in a process descriptor copy) 201 204 xlist_root_init( XPTR( local_cxy , &local_process->children_root ) ); … … 229 232 230 233 hal_fence(); 234 235 process_dmsg("\n[INFO] %s : exit for process %x in cluster %x\n", 236 __FUNCTION__ , local_process->pid ); 231 237 232 238 return 0; -
trunk/kernel/kern/rpc.c
r265 r279 101 101 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); 102 102 103 rpc_dmsg("\n[INFO] %s : enter\n", __FUNCTION__ ); 104 103 105 // initialise RPC descriptor header 104 106 rpc_desc_t rpc; … … 115 117 *error = (error_t)rpc.args[0]; 116 118 *ppn = (uint32_t)rpc.args[1]; 119 120 rpc_dmsg("\n[INFO] %s : completed\n", __FUNCTION__ ); 117 121 } 118 122 … … 153 157 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); 154 158 159 rpc_dmsg("\n[INFO] %s : enter\n", __FUNCTION__ ); 160 155 161 // initialise RPC descriptor header 156 162 rpc_desc_t rpc; … … 167 173 *pid = (pid_t)rpc.args[1]; 168 174 *error = (error_t)rpc.args[2]; 175 176 rpc_dmsg("\n[INFO] %s : completed\n", __FUNCTION__ ); 169 177 } 170 178 … … 204 212 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); 205 213 214 rpc_dmsg("\n[INFO] %s : enter\n", __FUNCTION__ ); 215 206 216 // initialise RPC descriptor header 207 217 rpc_desc_t rpc; … … 217 227 // get output arguments from RPC descriptor 218 228 *error = (error_t)rpc.args[1]; 229 230 rpc_dmsg("\n[INFO] %s : completed\n", __FUNCTION__ ); 219 231 } 220 232 … … 256 268 assert( (GET_CXY( process->ref_xp ) == local_cxy) , __FUNCTION__ , 257 269 "caller must be reference process cluster\n"); 270 271 rpc_dmsg("\n[INFO] %s : enter\n", __FUNCTION__ ); 258 272 259 273 // get local process index in reference cluster … … 282 296 if( target_cxy != local_cxy ) rpc_send_sync( target_cxy , &rpc ); 283 297 } 298 299 rpc_dmsg("\n[INFO] %s : completed\n", __FUNCTION__ ); 284 300 } 285 301 … … 327 343 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); 328 344 345 rpc_dmsg("\n[INFO] %s : enter\n", __FUNCTION__ ); 346 329 347 // initialise RPC descriptor header 330 348 rpc_desc_t rpc; … … 344 362 *thread_xp = (xptr_t)rpc.args[4]; 345 363 *error = (error_t)rpc.args[5]; 364 365 rpc_dmsg("\n[INFO] %s : completed\n", __FUNCTION__ ); 346 366 } 347 367 … … 405 425 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); 406 426 427 rpc_dmsg("\n[INFO] %s : enter\n", __FUNCTION__ ); 428 407 429 // initialise RPC descriptor header 408 430 rpc_desc_t rpc; … … 421 443 *thread_xp = (xptr_t)rpc.args[3]; 422 444 *error = (error_t)rpc.args[4]; 445 446 rpc_dmsg("\n[INFO] %s : completed\n", __FUNCTION__ ); 423 447 } 424 448 … … 463 487 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); 464 488 489 rpc_dmsg("\n[INFO] %s : enter\n", __FUNCTION__ ); 490 465 491 // initialise RPC descriptor header 466 492 rpc_desc_t rpc; … … 474 500 // register RPC request in remote RPC fifo 475 501 rpc_send_sync( cxy , &rpc ); 502 503 rpc_dmsg("\n[INFO] %s : completed\n", __FUNCTION__ ); 476 504 } 477 505 … … 513 541 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); 514 542 543 rpc_dmsg("\n[INFO] %s : enter\n", __FUNCTION__ ); 544 515 545 // initialise RPC descriptor header 516 546 rpc_desc_t rpc; … … 534 564 *inode_xp = (xptr_t)rpc.args[8]; 535 565 *error = (error_t)rpc.args[9]; 566 567 rpc_dmsg("\n[INFO] %s : completed\n", __FUNCTION__ ); 536 568 } 537 569 … … 590 622 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); 591 623 624 rpc_dmsg("\n[INFO] %s : enter\n", __FUNCTION__ ); 625 592 626 // initialise RPC descriptor header 593 627 rpc_desc_t rpc; … … 600 634 // register RPC request in remote RPC fifo (blocking function) 601 635 rpc_send_sync( cxy , &rpc ); 636 637 rpc_dmsg("\n[INFO] %s : completed\n", __FUNCTION__ ); 602 638 } 603 639 … … 632 668 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); 633 669 670 rpc_dmsg("\n[INFO] %s : enter\n", __FUNCTION__ ); 671 634 672 // initialise RPC descriptor header 635 673 rpc_desc_t rpc; … … 648 686 *dentry_xp = (xptr_t)rpc.args[3]; 649 687 *error = (error_t)rpc.args[4]; 688 689 rpc_dmsg("\n[INFO] %s : completed\n", __FUNCTION__ ); 650 690 } 651 691 … … 695 735 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); 696 736 737 rpc_dmsg("\n[INFO] %s : enter\n", __FUNCTION__ ); 738 697 739 // initialise RPC descriptor header 698 740 rpc_desc_t rpc; … … 705 747 // register RPC request in remote RPC fifo (blocking function) 706 748 rpc_send_sync( cxy , &rpc ); 749 750 rpc_dmsg("\n[INFO] %s : completed\n", __FUNCTION__ ); 707 751 } 708 752 … … 737 781 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); 738 782 783 rpc_dmsg("\n[INFO] %s : enter\n", __FUNCTION__ ); 784 739 785 // initialise RPC descriptor header 740 786 rpc_desc_t rpc; … … 752 798 *file_xp = (xptr_t)rpc.args[2]; 753 799 *error = (error_t)rpc.args[3]; 800 801 rpc_dmsg("\n[INFO] %s : completed\n", __FUNCTION__ ); 754 802 } 755 803 … … 790 838 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); 791 839 840 rpc_dmsg("\n[INFO] %s : enter\n", __FUNCTION__ ); 841 792 842 // initialise RPC descriptor header 793 843 rpc_desc_t rpc; … … 800 850 // register RPC request in remote RPC fifo (blocking function) 801 851 rpc_send_sync( cxy , &rpc ); 852 853 rpc_dmsg("\n[INFO] %s : completed\n", __FUNCTION__ ); 802 854 } 803 855 … … 831 883 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); 832 884 885 rpc_dmsg("\n[INFO] %s : enter\n", __FUNCTION__ ); 886 833 887 // initialise RPC descriptor header 834 888 rpc_desc_t rpc; … … 846 900 // get output values from RPC descriptor 847 901 *error = (error_t)rpc.args[3]; 902 903 rpc_dmsg("\n[INFO] %s : completed\n", __FUNCTION__ ); 848 904 } 849 905 … … 889 945 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); 890 946 947 rpc_dmsg("\n[INFO] %s : enter\n", __FUNCTION__ ); 948 891 949 // initialise RPC descriptor header 892 950 rpc_desc_t rpc; … … 902 960 // get output values from RPC descriptor 903 961 *error = (error_t)rpc.args[1]; 962 963 rpc_dmsg("\n[INFO] %s : completed\n", __FUNCTION__ ); 904 964 } 905 965 … … 938 998 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); 939 999 1000 rpc_dmsg("\n[INFO] %s : enter\n", __FUNCTION__ ); 1001 940 1002 // initialise RPC descriptor header 941 1003 rpc_desc_t rpc; … … 954 1016 *cluster = (uint32_t)rpc.args[3]; 955 1017 *error = (error_t)rpc.args[4]; 1018 1019 rpc_dmsg("\n[INFO] %s : completed\n", __FUNCTION__ ); 956 1020 } 957 1021 … … 994 1058 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); 995 1059 1060 rpc_dmsg("\n[INFO] %s : enter\n", __FUNCTION__ ); 1061 996 1062 // initialise RPC descriptor header 997 1063 rpc_desc_t rpc; … … 1008 1074 // get output argument from rpc descriptor 1009 1075 *vseg_xp = rpc.args[2]; 1076 1077 rpc_dmsg("\n[INFO] %s : completed\n", __FUNCTION__ ); 1010 1078 } 1011 1079 … … 1050 1118 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); 1051 1119 1120 rpc_dmsg("\n[INFO] %s : enter\n", __FUNCTION__ ); 1121 1052 1122 // initialise RPC descriptor header 1053 1123 rpc_desc_t rpc; … … 1066 1136 *ppn = (ppn_t)rpc.args[3]; 1067 1137 *error = (error_t)rpc.args[4]; 1138 1139 rpc_dmsg("\n[INFO] %s : completed\n", __FUNCTION__ ); 1068 1140 } 1069 1141 … … 1105 1177 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); 1106 1178 1179 rpc_dmsg("\n[INFO] %s : enter\n", __FUNCTION__ ); 1180 1107 1181 // initialise RPC descriptor header 1108 1182 rpc_desc_t rpc; … … 1118 1192 // get output arguments from RPC descriptor 1119 1193 *buf_xp = (xptr_t)rpc.args[1]; 1194 1195 rpc_dmsg("\n[INFO] %s : completed\n", __FUNCTION__ ); 1120 1196 } 1121 1197 … … 1152 1228 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); 1153 1229 1230 rpc_dmsg("\n[INFO] %s : enter\n", __FUNCTION__ ); 1231 1154 1232 // initialise RPC descriptor header 1155 1233 rpc_desc_t rpc; … … 1163 1241 // register RPC request in remote RPC fifo 1164 1242 rpc_send_sync( cxy , &rpc ); 1243 1244 rpc_dmsg("\n[INFO] %s : completed\n", __FUNCTION__ ); 1165 1245 } 1166 1246 … … 1199 1279 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); 1200 1280 1281 rpc_dmsg("\n[INFO] %s : enter\n", __FUNCTION__ ); 1282 1201 1283 // initialise RPC descriptor header 1202 1284 rpc_desc_t rpc; … … 1217 1299 // get output values from RPC descriptor 1218 1300 *error = (error_t)rpc.args[6]; 1301 1302 rpc_dmsg("\n[INFO] %s : completed\n", __FUNCTION__ ); 1219 1303 } 1220 1304 … … 1262 1346 rpc_desc_t * rpc ) 1263 1347 { 1264 thread_t * this = CURRENT_THREAD;1265 1348 uint32_t cores; 1266 1349 error_t error; … … 1268 1351 reg_t sr_save; 1269 1352 1270 // get client CPU and cluster coordinates 1271 cxy_t client_cxy = local_cxy; 1272 lid_t client_lid = CURRENT_CORE->lid; 1353 thread_t * this = CURRENT_THREAD; 1354 1355 rpc_dmsg("\n[INFO] %s : enter / client_cxy = %x / server_cxy = %x\n", 1356 __FUNCTION__ , local_cxy , server_cxy ); 1273 1357 1274 1358 // allocate and initialise an extended pointer on the RPC descriptor 1275 xptr_t xp = XPTR( client_cxy , rpc );1276 1277 // get local pointer on rpc_fifo in remote cluster with the1278 // assumption that addresses are identical in all clusters1359 xptr_t desc_xp = XPTR( local_cxy , rpc ); 1360 1361 // get local pointer on rpc_fifo in remote cluster, with the 1362 // assumption that rpc_fifo pddresses are identical in all clusters 1279 1363 rpc_fifo_t * rf = &LOCAL_CLUSTER->rpc_fifo; 1280 1364 … … 1284 1368 { 1285 1369 error = remote_fifo_put_item( XPTR( server_cxy , &rf->fifo ), 1286 (uint64_t *)&xp,1370 (uint64_t )desc_xp, 1287 1371 &first ); 1288 1372 1289 1373 if ( error ) 1290 1374 { 1291 printk("\n[WARNING] %s : core %d in cluster %x cannot post RPC to cluster %x\n", 1292 __FUNCTION__ , client_lid , client_cxy , server_cxy ); 1375 printk("\n[WARNING] %s : cluster %x cannot post RPC to cluster %x\n", 1376 __FUNCTION__ , local_cxy , server_cxy ); 1377 1293 1378 if( thread_can_yield() ) sched_yield(); 1379 } 1380 else 1381 { 1294 1382 } 1295 1383 } 1296 1384 while( error ); 1297 1385 1298 rpc_dmsg("\n[INFO] %s on core %d in cluster %x sent RPC %p to cluster %x\n",1299 __FUNCTION__ , client_lid , client_cxy , rpc , server_cxy);1386 rpc_dmsg("\n[INFO] %s : RPC registered / client_cxy = %x / server_cxy = %x\n", 1387 __FUNCTION__ , local_cxy , server_cxy , first ); 1300 1388 1301 // send IPI if this is the first RPC in remote FIFO 1302 // and no CPU is in kernel mode in server cluster. 1303 // the selected CPU in server has the same lid as the client CPU. 1389 // send IPI to remote CP0, if this is the first RPC in remote FIFO, 1390 // and there is no CPU is in kernel mode in server cluster. 1304 1391 if( first ) 1305 1392 { … … 1309 1396 if( cores == 0 ) // no core in kernel mode in server 1310 1397 { 1311 dev_pic_send_ipi( server_cxy , client_lid);1312 1313 rpc_dmsg("\n[INFO] %s : core %d in cluster %x send IPI to core %d in cluster%x\n",1314 __FUNCTION__, client_lid , client_cxy , client_lid, server_cxy );1398 dev_pic_send_ipi( server_cxy , 0 ); 1399 1400 rpc_dmsg("\n[INFO] %s : IPI sent / client_cxy = %x / server_cxy = %x\n", 1401 __FUNCTION__, local_cxy , server_cxy ); 1315 1402 } 1316 1403 } 1317 1404 1318 // activate preemptionto allow incoming RPC and avoid deadlock1405 // enable IRQs to allow incoming RPC and avoid deadlock 1319 1406 if( this->type == THREAD_RPC ) hal_enable_irq( &sr_save ); 1320 1407 1321 // the sending thread poll the response slot until RPC completed 1408 // the server thread poll the response slot until RPC completed 1409 // TODO this could be replaced by a descheduling policy... [AG] 1322 1410 while( 1 ) 1323 1411 { … … 1325 1413 } 1326 1414 1327 // restore preemption1415 // restore IRQs 1328 1416 if( this->type == THREAD_RPC ) hal_restore_irq( sr_save ); 1417 1418 rpc_dmsg("\n[INFO] %s : completed / client_cxy = %x / server_cxy = %x\n", 1419 __FUNCTION__ , local_cxy , server_cxy ); 1329 1420 1330 1421 } // end rpc_send_sync() … … 1344 1435 } 1345 1436 1346 ///////////////////////////////////////////// ///1347 error_trpc_execute_all( rpc_fifo_t * rpc_fifo )1437 ///////////////////////////////////////////// 1438 void rpc_execute_all( rpc_fifo_t * rpc_fifo ) 1348 1439 { 1349 1440 xptr_t xp; // extended pointer on RPC descriptor … … 1353 1444 rpc_desc_t * desc; // pointer on RPC descriptor 1354 1445 uint32_t index; // RPC index 1355 uint32_t expected; // number of expected responses1356 1446 cxy_t client_cxy; // client cluster identifier 1357 1447 error_t error; … … 1370 1460 if ( error == 0 ) // One RPC request successfully extracted from RPC_FIFO 1371 1461 { 1372 rpc_dmsg("\n[INFO] %s : RPC_THREAD %x on core %x in cluster %x handles RPC %d\n" 1462 rpc_dmsg("\n[INFO] %s : RPC_THREAD %x on core %x in cluster %x handles RPC %d\n", 1373 1463 __FUNCTION__ , this->trdid , core->lid , local_cxy , count ); 1374 1464 … … 1377 1467 desc = (rpc_desc_t *)GET_PTR( xp ); 1378 1468 1379 // get rpc index and expected responsesfrom RPC descriptor1469 // get rpc index from RPC descriptor 1380 1470 index = hal_remote_lw( XPTR( client_cxy , &desc->index ) ); 1381 expected = hal_remote_lw( XPTR( client_cxy , &desc->response ) );1382 1471 1383 1472 // call the relevant server function … … 1388 1477 1389 1478 // notify RPC completion as required 1390 if( expected == 1 ) hal_remote_sw( XPTR(client_cxy,&desc->response) , 0 ); 1391 if( expected > 1 ) hal_remote_atomic_add( XPTR(client_cxy,&desc->response) , -1 ); 1479 hal_remote_atomic_add( XPTR(client_cxy,&desc->response) , -1 ); 1392 1480 } 1393 1481 … … 1400 1488 (count > CONFIG_RPC_PENDING_MAX) ) break; 1401 1489 } 1402 while( 1 ) 1403 1404 rpc_dmsg("\n[INFO] %s running on core %d in cluster %x exit\n" 1405 __FUNCTION__ , CURRENT_CORE->lid , local_cxy ); 1406 1490 while( 1 ); 1491 1407 1492 // update RPC_FIFO global counter 1408 1493 rpc_fifo->count += count; 1409 1494 1410 return 0;1411 1495 } // end rpc_execute_all() 1412 1496 … … 1422 1506 reg_t sr_save; 1423 1507 1508 1424 1509 this = CURRENT_THREAD; 1425 1510 core = this->core; … … 1427 1512 found = false; 1428 1513 1429 // calling thread must be the RPC_FIFO owner 1430 if( this->trdid != rpc_fifo->owner ) 1431 { 1432 printk("\n[PANIC] in %s : calling thread is not RPC_FIFO owner\n", __FUNCTION__ ); 1433 hal_core_sleep(); 1434 } 1514 assert( (this->trdid == rpc_fifo->owner) , __FUNCTION__ , 1515 "calling thread is not RPC_FIFO owner\n" ); 1435 1516 1436 1517 // makes the calling thread not preemptable … … 1443 1524 { 1444 1525 thread = LIST_ELEMENT( iter , thread_t , sched_list ); 1445 if( (thread->type == THREAD_RPC) && (thread->blocked == 1526 if( (thread->type == THREAD_RPC) && (thread->blocked == THREAD_BLOCKED_IDLE ) ) 1446 1527 { 1447 1528 found = true; … … 1453 1534 { 1454 1535 thread->blocked = 0; 1536 1537 rpc_dmsg("\n[INFO] %s : activate RPC thread %x on core %x in cluster %x at cycle %d\n", 1538 __FUNCTION__ , thread , core->gid , local_cxy , hal_get_cycles() ); 1455 1539 } 1456 1540 else // create a new RPC thread … … 1469 1553 } 1470 1554 1471 rpc_dmsg("\n[INFO] %s createsRPC thread %x on core %x in cluster %x at cycle %d\n",1555 rpc_dmsg("\n[INFO] %s : create RPC thread %x on core %x in cluster %x at cycle %d\n", 1472 1556 __FUNCTION__ , thread , core->gid , local_cxy , hal_get_cycles() ); 1473 1557 1474 1558 // update core descriptor counter 1475 hal_atomic_add( & core->rpc_threads , 1 );1559 hal_atomic_add( &LOCAL_CLUSTER->rpc_threads , 1 ); 1476 1560 } 1477 1561 1478 1562 // update owner in rpc_fifo 1479 1563 rpc_fifo->owner = thread->trdid; 1480 1481 rpc_dmsg ("\n[INFO] %s activates RPC thread %x on core %x in cluster %x at cycle %d\n",1482 __FUNCTION__ , thread , core->gid , local_cxy , hal_get_cycles() );1483 1564 1484 1565 // current thread deschedules / RPC thread start execution … … 1506 1587 } 1507 1588 1508 // calling thread tries to take the light lock, 1509 // and activates an RPC thread if success 1589 // try to take the light lock, and activates an RPC thread if success 1510 1590 if( hal_atomic_test_set( &rpc_fifo->owner , this->trdid ) ) 1511 1591 { … … 1543 1623 1544 1624 // this infinite loop is not preemptable 1545 // the RPC thread deschedule when the RPC_FIFO is empty1625 // the RPC thread deschedule only when the RPC_FIFO is empty 1546 1626 while(1) 1547 1627 { … … 1561 1641 1562 1642 1563 // suicide if too much RPC threads for this core1564 if( this->core->rpc_threads >CONFIG_RPC_THREADS_MAX )1643 // block and deschedule or sucide 1644 if( LOCAL_CLUSTER->rpc_threads >= CONFIG_RPC_THREADS_MAX ) 1565 1645 { 1566 1646 rpc_dmsg("\n[INFO] RPC thread %x suicide on core %d in cluster %x at cycle %d\n", … … 1568 1648 1569 1649 // update core descriptor counter 1570 hal_atomic_add( & this->core->rpc_threads , -1 );1650 hal_atomic_add( &LOCAL_CLUSTER->rpc_threads , -1 ); 1571 1651 1572 1652 // suicide 1573 1653 thread_exit(); 1574 1654 } 1575 1576 // block and deschedule 1577 rpc_dmsg("\n[INFO] RPC thread %x deschedule on core %d in cluster %x at cycle %d\n", 1578 this->trdid , this->core->lid , local_cxy , hal_get_cycles() ); 1579 1580 thread_block( this , THREAD_BLOCKED_IDLE ); 1581 sched_yield(); 1582 1583 rpc_dmsg("\n[INFO] RPC thread %x wake up on core %d in cluster %x at cycle %d\n", 1584 this->trdid , this->core->lid , local_cxy , hal_get_cycles() ); 1585 } 1655 else 1656 { 1657 rpc_dmsg("\n[INFO] RPC thread %x blocks on core %d in cluster %x at cycle %d\n", 1658 this->trdid , this->core->lid , local_cxy , hal_get_cycles() ); 1659 1660 thread_block( this , THREAD_BLOCKED_IDLE ); 1661 sched_yield(); 1662 1663 rpc_dmsg("\n[INFO] RPC thread %x wake up on core %d in cluster %x at cycle %d\n", 1664 this->trdid , this->core->lid , local_cxy , hal_get_cycles() ); 1665 } 1666 } // end while 1586 1667 } // end rpc_thread_func() 1587 1668 -
trunk/kernel/kern/rpc.h
r265 r279 158 158 * This function is the entry point for RPC handling on the server side. 159 159 * It can be executed by any thread running (in kernel mode) on any core. 160 * It first checks the core private RPC fifo, an then the cluster shared RPC fifo.161 * It calls the rpc_activate_thread() function to activate a dedicated RPC thread.162 *********************************************************************************** 163 * @ returns true if at least one RPC found/ false otherwise.160 * It checks the RPC fifo, try to take the light-lock and activates (or creates) 161 * an RPC thread in case of success. 162 *********************************************************************************** 163 * @ returns true if success / false otherwise. 164 164 **********************************************************************************/ 165 165 bool_t rpc_check(); … … 170 170 *********************************************************************************** 171 171 * @ rpc_fifo : pointer on the local RPC fifo 172 * @ returns 0 if success 173 **********************************************************************************/ 174 error_t rpc_execute_all( rpc_fifo_t * rpc_fifo ); 172 **********************************************************************************/ 173 void rpc_execute_all( rpc_fifo_t * rpc_fifo ); 175 174 176 175 /********************************************************************************** -
trunk/kernel/kern/scheduler.c
r278 r279 41 41 sched->k_threads_nr = 0; 42 42 43 sched->current = NULL;44 sched->idle = NULL; 45 sched->u_last = NULL; 46 sched->k_last = NULL; 43 sched->current = CURRENT_THREAD; 44 sched->idle = NULL; // initialized in kernel_init() 45 sched->u_last = NULL; // initialized in sched_register_thread() 46 sched->k_last = NULL; // initialized in sched_register_thread() 47 47 48 48 // initialise threads lists … … 62 62 spinlock_lock( &sched->lock ); 63 63 64 // register thread65 64 if( type == THREAD_USER ) 66 65 { 66 // register thread in scheduler user list 67 67 list_add_last( &sched->u_root , &thread->sched_list ); 68 68 sched->u_threads_nr++; 69 70 // initialize u_last field if first user thread 71 if( sched->u_last == NULL ) sched->u_last = &thread->sched_list; 69 72 } 70 73 else // kernel thread 71 74 { 75 // register thread in scheduler kernel list 72 76 list_add_last( &sched->k_root , &thread->sched_list ); 73 77 sched->k_threads_nr++; 78 79 // initialize k_last field if first kernel thread 80 if( sched->k_last == NULL ) sched->k_last = &thread->sched_list; 74 81 } 75 82 … … 89 96 spinlock_lock( &sched->lock ); 90 97 91 // remove thread92 98 if( type == THREAD_USER ) 93 99 { 100 // remove thread from user list 94 101 list_unlink( &thread->sched_list ); 95 102 sched->u_threads_nr--; 103 104 // reset the u_last field if list empty 105 if( sched->u_threads_nr == 0 ) sched->u_last = NULL; 96 106 } 97 107 else // kernel thread 98 108 { 109 // remove thread from kernel list 99 110 list_unlink( &thread->sched_list ); 100 111 sched->k_threads_nr--; 112 113 // reset the k_last field if list empty 114 if( sched->k_threads_nr == 0 ) sched->k_last = NULL; 101 115 } 102 116 … … 140 154 list_entry_t * last; 141 155 142 // first scan the kernel threads 143 last = sched->k_last; 144 current = sched->k_last; 145 do 146 { 147 // get next entry in kernel list 148 current = list_next( &sched->k_root , current ); 149 150 // skip the list root that does not contain a thread 151 if( current == NULL ) continue; 152 153 // get thread pointer 154 thread = LIST_ELEMENT( current , thread_t , sched_list ); 155 156 // return thread if not blocked 157 if( thread->blocked == 0 ) 156 // first : scan the kernel threads list, 157 // only if this list is not empty 158 if( list_is_empty( &sched->k_root ) == false ) 159 { 160 last = sched->k_last; 161 current = sched->k_last; 162 do 158 163 { 159 // release lock 160 spinlock_unlock( &sched->lock ); 161 return thread; 164 // get next entry in kernel list 165 current = list_next( &sched->k_root , current ); 166 167 // skip the root that does not contain a thread 168 if( current == NULL ) current = sched->k_root.next; 169 170 // get thread pointer for this entry 171 thread = LIST_ELEMENT( current , thread_t , sched_list ); 172 173 // return thread if runnable 174 if( thread->blocked == 0 ) 175 { 176 // release lock 177 spinlock_unlock( &sched->lock ); 178 return thread; 179 } 162 180 } 163 } 164 while( current != last ); 165 166 // second scan the user threads 167 last = sched->u_last; 168 current = sched->u_last; 169 do 170 { 171 // get next entry in user list 172 current = list_next( &sched->u_root , current ); 173 174 // skip the list root that does not contain a thread 175 if( current == NULL ) continue; 176 177 // get thread pointer 178 thread = LIST_ELEMENT( current , thread_t , sched_list ); 179 180 // return thread if not blocked 181 if( thread->blocked == 0 ) 181 while( current != last ); 182 } 183 184 // second : scan the user threads list, 185 // only if this list is not empty 186 if( list_is_empty( &sched->u_root ) == false ) 187 { 188 last = sched->u_last; 189 current = sched->u_last; 190 do 182 191 { 183 // release lock 184 spinlock_unlock( &sched->lock ); 185 return thread; 192 // get next entry in user list 193 current = list_next( &sched->u_root , current ); 194 195 // skip the root that does not contain a thread 196 if( current == NULL ) current = sched->u_root.next; 197 198 // get thread pointer for this entry 199 thread = LIST_ELEMENT( current , thread_t , sched_list ); 200 201 // return thread if runnable 202 if( thread->blocked == 0 ) 203 { 204 // release lock 205 spinlock_unlock( &sched->lock ); 206 return thread; 207 } 186 208 } 187 }188 while( current != last );209 while( current != last ); 210 } 189 211 190 212 // release lock 191 213 spinlock_unlock( &sched->lock ); 192 214 193 // third ,return idle thread if no runnable thread215 // third : return idle thread if no runnable thread 194 216 return sched->idle; 195 217 … … 234 256 thread_t * current = CURRENT_THREAD; 235 257 core_t * core = current->core; 258 scheduler_t * sched = &core->scheduler; 236 259 237 260 if( thread_can_yield() == false ) … … 265 288 __FUNCTION__, core->lid, local_cxy, current->trdid, next->trdid ); 266 289 267 // switch contexts if new thread290 // switch contexts and update scheduler state if new thread 268 291 if( next != current ) 269 292 { 270 293 hal_cpu_context_save( current ); 271 294 hal_cpu_context_restore( next ); 295 296 if( current->type == THREAD_USER ) sched->u_last = ¤t->sched_list; 297 else sched->k_last = ¤t->sched_list; 298 299 sched->current = next; 272 300 } 273 301 -
trunk/kernel/kern/scheduler.h
r14 r279 34 34 struct thread_s; 35 35 36 /********************************************************************************************* **36 /********************************************************************************************* 37 37 * This structure define the scheduler associated to a given core. 38 38 * WARNING : the idle thread is executed when there is no runable thread in the list 39 39 * of attached threads, but is NOT part of the list of attached threads. 40 ******************************************************************************************** **/40 ********************************************************************************************/ 41 41 42 42 typedef struct scheduler_s 43 43 { 44 spinlock_t lock; /*! readlock protecting lists of threads 45 uint16_t u_threads_nr; /*! total numbre of attached user threads 46 uint16_t k_threads_nr; /*! total number of attached kernel threads 47 list_entry_t u_root; /*! root of list of user threads for this scheduler 48 list_entry_t k_root; /*! root of list of kernel threads for this scheduler 49 list_entry_t * u_last; /*! pointer on list_entry for last executed k ernel thread*/50 list_entry_t * k_last; /*! pointer on list entry for last executed u ser thread*/51 struct thread_s * idle; /*! pointer on idle thread 52 struct thread_s * current; /*! pointer on current running thread 44 spinlock_t lock; /*! readlock protecting lists of threads */ 45 uint16_t u_threads_nr; /*! total numbre of attached user threads */ 46 uint16_t k_threads_nr; /*! total number of attached kernel threads */ 47 list_entry_t u_root; /*! root of list of user threads for this scheduler */ 48 list_entry_t k_root; /*! root of list of kernel threads for this scheduler */ 49 list_entry_t * u_last; /*! pointer on list_entry for last executed k_thread */ 50 list_entry_t * k_last; /*! pointer on list entry for last executed u_thread */ 51 struct thread_s * idle; /*! pointer on idle thread */ 52 struct thread_s * current; /*! pointer on current running thread */ 53 53 } 54 54 scheduler_t; 55 55 56 /********************************************************************************************* **56 /********************************************************************************************* 57 57 * This function initialises the scheduler for a given core. 58 ******************************************************************************************** **/58 ********************************************************************************************/ 59 59 void sched_init( struct core_s * core ); 60 60 61 /********************************************************************************************* **61 /********************************************************************************************* 62 62 * This function register a new thread in a given core scheduler. 63 ********************************************************************************************* **63 ********************************************************************************************* 64 64 * @ core : local pointer on the core descriptor. 65 65 * @ thread : local pointer on the thread descriptor. 66 ******************************************************************************************** **/66 ********************************************************************************************/ 67 67 void sched_register_thread( struct core_s * core, 68 68 struct thread_s * thread ); 69 69 70 /********************************************************************************************* **70 /********************************************************************************************* 71 71 * This function removes a thread from the set of threads attached to a given core. 72 ********************************************************************************************* **72 ********************************************************************************************* 73 73 * @ thread : local pointer on the thread descriptor. 74 ******************************************************************************************** **/74 ********************************************************************************************/ 75 75 void sched_remove_thread( struct thread_s * thread ); 76 76 77 /********************************************************************************************* **77 /********************************************************************************************* 78 78 * This function handles pending signals for all registered threads, and tries to make 79 79 * a context switch for the core running the calling thread. … … 82 82 * - If there is no other runable thread, the calling thread continues execution. 83 83 * - If there is no runable thread, the idle thread is executed. 84 ******************************************************************************************** **/84 ********************************************************************************************/ 85 85 void sched_yield(); 86 86 87 /********************************************************************************************* **87 /********************************************************************************************* 88 88 * This function handles pending signals for all registered threads, and make 89 89 * a context switch to the thread defined by the <thread> argument. 90 90 * If the selected thread is not attached to the same core as the calling thread, 91 91 * or is blocked, it causes a kernel panic. 92 ********************************************************************************************* **92 ********************************************************************************************* 93 93 * @ new : local pointer on the thread to run. 94 ******************************************************************************************** **/94 ********************************************************************************************/ 95 95 void sched_switch_to( struct thread_s * new ); 96 96 97 /********************************************************************************************* **97 /********************************************************************************************* 98 98 * This function scan all threads attached to a given core scheduler, and executes 99 99 * the relevant actions for pending signals, such as the THREAD_SIG_KILL signal. 100 ********************************************************************************************* **100 ********************************************************************************************* 101 101 * @ core : local pointer on the core descriptor. 102 ******************************************************************************************** **/102 ********************************************************************************************/ 103 103 void sched_handle_signals( struct core_s * core ); 104 104 105 /********************************************************************************************* **105 /********************************************************************************************* 106 106 * This function is used by the scheduler of a given core to actually kill a thread that has 107 107 * the SIG_KILL signal set (following a thread_exit() or a thread_kill() event). … … 110 110 * - It removes the thread from the scheduler. 111 111 * - It release physical memory allocated for thread descriptor. 112 ********************************************************************************************* **112 ********************************************************************************************* 113 113 * @ thread : local pointer on the thread descriptor. 114 ******************************************************************************************** **/114 ********************************************************************************************/ 115 115 void sched_kill_thread( struct thread_s * thread ); 116 116 117 /********************************************************************************************* **117 /********************************************************************************************* 118 118 * This function does NOT modify the scheduler state. 119 119 * It just select a thread in the list of attached threads, implementing the following policy: … … 123 123 * the last executed one, and returns the first runable found (can be the current thread). 124 124 * 3) if no runable thread found, it returns the idle thread. 125 ********************************************************************************************* **125 ********************************************************************************************* 126 126 * @ core : local pointer on the core descriptor. 127 127 * @ returns pointer on selected thread descriptor 128 ******************************************************************************************** **/128 ********************************************************************************************/ 129 129 struct thread_s * sched_select( struct core_s * core ); 130 130 131 /********************************************************************************************* **131 /********************************************************************************************* 132 132 * This function scan the list of kernel threads to find an idle (blocked) RPC thread. 133 ********************************************************************************************* **133 ********************************************************************************************* 134 134 * @ core : local pointer on the core descriptor. 135 135 * @ returns pointer on RPC thread descriptor / returns NULL if no idle RPC thread. 136 ******************************************************************************************** **/136 ********************************************************************************************/ 137 137 struct thread_s * sched_get_rpc_thead( struct core_s * core ); 138 138 -
trunk/kernel/kern/thread.h
r174 r279 213 213 214 214 uint32_t dev_channel; /*! device channel for a DEV thread */ 215 union /*! embedded command for a DEV thread */ 216 { 217 ioc_command_t ioc; /*! IOC device generic command */ 218 txt_command_t txt; /*! TXT device generic command */ 219 nic_command_t nic; /*! NIC device generic command */ 220 mmc_command_t mmc; /*! MMC device generic command */ 221 dma_command_t dma; /*! DMA device generic command */ 222 } 223 command; 215 216 ioc_command_t ioc_cmd; /*! IOC device generic command */ 217 txt_command_t txt_cmd; /*! TXT device generic command */ 218 nic_command_t nic_cmd; /*! NIC device generic command */ 219 mmc_command_t mmc_cmd; /*! MMC device generic command */ 220 dma_command_t dma_cmd; /*! DMA device generic command */ 224 221 225 222 cxy_t rpc_client_cxy; /*! client cluster index (for a RPC thread) */
Note: See TracChangeset
for help on using the changeset viewer.