Changeset 408 for trunk/kernel/kern
- Timestamp:
- Dec 5, 2017, 4:20:07 PM (7 years ago)
- Location:
- trunk/kernel/kern
- Files:
-
- 17 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/kernel/kern/chdev.c
r407 r408 120 120 uint32_t lid; // core running the server thread local index 121 121 xptr_t lock_xp; // extended pointer on lock protecting the chdev queue 122 uint32_t modified; // non zero if the server thread state was modified122 uint32_t different; // non zero if server thread core != client thread core 123 123 uint32_t save_sr; // for critical section 124 124 … … 152 152 lid = hal_remote_lw( XPTR( chdev_cxy , &core_ptr->lid ) ); 153 153 154 // enter critical section 154 // compute server core != thread core 155 different = (lid != this->core->lid) || (local_cxy != chdev_cxy); 156 157 // enter critical section to make atomic : 158 // (1) client blocking 159 // (2) client registration in server queue 160 // (3) IPI to force server scheduling 161 // (4) descheduling 162 // ... in this order 155 163 hal_disable_irq( &save_sr ); 164 165 // block current thread 166 thread_block( CURRENT_THREAD , THREAD_BLOCKED_IO ); 156 167 157 168 // register client thread in waiting queue … … 160 171 remote_spinlock_unlock( lock_xp ); 161 172 162 // unblock server thread 163 modified = thread_unblock( XPTR( chdev_cxy , server_ptr ), THREAD_BLOCKED_DEV_QUEUE ); 164 165 // send IPI to core running the server thread 166 if( modified ) dev_pic_send_ipi( chdev_cxy , lid ); 173 // send IPI to core running the server thread if required 174 if( different ) dev_pic_send_ipi( chdev_cxy , lid ); 167 175 168 // block client thread169 assert( thread_can_yield( this ) , __FUNCTION__ , "illegal sched_yield\n" );170 171 176 chdev_dmsg("\n[DBG] %s : core[%x,%d] (thread %s) deschedules / cycle %d\n", 172 177 __FUNCTION__, local_cxy, this->core->lid, thread_type_str(this->type) , hal_time_stamp() ); 173 178 174 thread_block( CURRENT_THREAD , THREAD_BLOCKED_IO ); 175 sched_yield(); 179 // deschedule 180 assert( thread_can_yield( this ) , __FUNCTION__ , "illegal sched_yield\n" ); 181 sched_yield("blocked on I/O"); 176 182 177 183 chdev_dmsg("\n[DBG] %s : core[%x,%d] (thread %s) resumes / cycle %d\n", … … 217 223 __FUNCTION__ , server , hal_time_stamp() ); 218 224 219 // block and deschedule 220 thread_block( server , THREAD_BLOCKED_DEV_QUEUE ); 221 sched_yield(); 225 // deschedule 226 sched_yield("I/O queue empty"); 222 227 223 228 chdev_dmsg("\n[DBG] %s : thread %x resume /cycle %d\n", -
trunk/kernel/kern/cluster.c
r407 r408 45 45 #include <dqdt.h> 46 46 47 ///////////////////////////////////////////////////////////////////////////////////// //////47 ///////////////////////////////////////////////////////////////////////////////////// 48 48 // Extern global variables 49 ///////////////////////////////////////////////////////////////////////////////////// //////49 ///////////////////////////////////////////////////////////////////////////////////// 50 50 51 51 extern process_t process_zero; // allocated in kernel_init.c file 52 52 53 54 55 //////////////////////////////////56 void cluster_sysfs_register(void)57 {58 // TODO59 }60 53 61 54 ///////////////////////////////////////////////// -
trunk/kernel/kern/cluster.h
r407 r408 196 196 * This function allocates a new PID in local cluster, that becomes the process owner. 197 197 * It registers the process descriptor extended pointer in the local processs manager 198 * pref_tbl[] array. This function is called by the rpc_process_alloc_pid() function for199 * remote registration, or by the process_init_create() function for local registration.198 * pref_tbl[] array. This function is called by the process_make_fork() function, 199 * or by the process_init_create() function. 200 200 ****************************************************************************************** 201 201 * @ process : [in] extended pointer on the process descriptor. -
trunk/kernel/kern/core.c
r407 r408 111 111 112 112 // handle scheduler 113 if( (ticks % CONFIG_SCHED_TICKS_PER_QUANTUM) == 0 ) sched_yield( );113 if( (ticks % CONFIG_SCHED_TICKS_PER_QUANTUM) == 0 ) sched_yield( "TICK"); 114 114 115 115 // update DQDT -
trunk/kernel/kern/do_syscall.c
r407 r408 29 29 #include <printk.h> 30 30 #include <syscalls.h> 31 #include <shared_syscalls.h> 31 32 32 /////////////////////////////////////////////////////////////////////////////////////// //////33 /////////////////////////////////////////////////////////////////////////////////////// 33 34 // This ƒonction should never be called... 34 /////////////////////////////////////////////////////////////////////////////////////// //////35 /////////////////////////////////////////////////////////////////////////////////////// 35 36 static int sys_undefined() 36 37 { … … 39 40 } 40 41 41 /////////////////////////////////////////////////////////////////////////////////////// //////42 /////////////////////////////////////////////////////////////////////////////////////// 42 43 // This array of pointers define the kernel functions implementing the syscalls. 43 44 // It must be kept consistent with the enum in "shared_syscalls.h" file. 44 /////////////////////////////////////////////////////////////////////////////////////// //////45 /////////////////////////////////////////////////////////////////////////////////////// 45 46 46 47 typedef int (*sys_func_t) (); … … 59 60 sys_mutex, // 9 60 61 61 sys_ undefined,// 1062 sys_exit, // 10 62 63 sys_munmap, // 11 63 64 sys_open, // 12 … … 101 102 }; 102 103 104 //////////////////////////////////// 105 char * syscall_str( uint32_t index ) 106 { 107 if ( index == SYS_THREAD_EXIT ) return "THREAD_EXIT"; // 0 108 else if( index == SYS_THREAD_YIELD ) return "THREAD_YIELD"; // 1 109 else if( index == SYS_THREAD_CREATE ) return "THREAD_CREATE"; // 2 110 else if( index == SYS_THREAD_JOIN ) return "THREAD_JOIN"; // 3 111 else if( index == SYS_THREAD_DETACH ) return "THREAD_DETACH"; // 4 112 else if( index == SYS_SEM ) return "SEM"; // 6 113 else if( index == SYS_CONDVAR ) return "CONDVAR"; // 7 114 else if( index == SYS_BARRIER ) return "BARRIER"; // 8 115 else if( index == SYS_MUTEX ) return "MUTEX"; // 9 116 117 else if( index == SYS_EXIT ) return "EXIT"; // 10 118 else if( index == SYS_MUNMAP ) return "MUNMAP"; // 11 119 else if( index == SYS_OPEN ) return "OPEN"; // 12 120 else if( index == SYS_MMAP ) return "MMAP"; // 13 121 else if( index == SYS_READ ) return "READ"; // 14 122 else if( index == SYS_WRITE ) return "WRITE"; // 15 123 else if( index == SYS_LSEEK ) return "LSEEK"; // 16 124 else if( index == SYS_CLOSE ) return "CLOSE"; // 17 125 else if( index == SYS_UNLINK ) return "UNLINK"; // 18 126 else if( index == SYS_PIPE ) return "PIPE"; // 19 127 128 else if( index == SYS_CHDIR ) return "CHDIR"; // 20 129 else if( index == SYS_MKDIR ) return "MKDIR"; // 21 130 else if( index == SYS_MKFIFO ) return "MKFIFO"; // 22 131 else if( index == SYS_OPENDIR ) return "OPENDIR"; // 23 132 else if( index == SYS_READDIR ) return "READDIR"; // 24 133 else if( index == SYS_CLOSEDIR ) return "CLOSEDIR"; // 25 134 else if( index == SYS_GETCWD ) return "GETCWD"; // 26 135 else if( index == SYS_ALARM ) return "ALARM"; // 28 136 else if( index == SYS_RMDIR ) return "RMDIR"; // 29 137 138 else if( index == SYS_UTLS ) return "UTLS"; // 30 139 else if( index == SYS_CHMOD ) return "CHMOD"; // 31 140 else if( index == SYS_SIGNAL ) return "SIGNAL"; // 32 141 else if( index == SYS_TIMEOFDAY ) return "TIMEOFDAY"; // 33 142 else if( index == SYS_KILL ) return "KILL"; // 34 143 else if( index == SYS_GETPID ) return "GETPID"; // 35 144 else if( index == SYS_FORK ) return "FORK"; // 36 145 else if( index == SYS_EXEC ) return "EXEC"; // 37 146 else if( index == SYS_STAT ) return "STAT"; // 38 147 else if( index == SYS_TRACE ) return "TRACE"; // 39 148 149 else if( index == SYS_GET_CONFIG ) return "GET_CONFIG"; // 40 150 else if( index == SYS_GET_CORE ) return "GET_CORE"; // 41 151 else if( index == SYS_GET_CYCLE ) return "GET_CYCLE"; // 42 152 else if( index == SYS_GET_SCHED ) return "GET_SCHED"; // 43 153 else if( index == SYS_PANIC ) return "PANIC"; // 44 154 else if( index == SYS_SLEEP ) return "SLEEP"; // 45 155 else if( index == SYS_WAKEUP ) return "WAKEUP"; // 46 156 157 else return "undefined"; 158 } 159 160 103 161 ////////////////////////////////// 104 162 reg_t do_syscall( thread_t * this, … … 109 167 reg_t service_num ) 110 168 { 111 int 169 int error = 0; 112 170 113 171 // update user time 114 172 thread_user_time_update( this ); 115 173 116 // enable interrupts117 hal_enable_irq( NULL );118 119 174 // check syscall index 120 175 if( service_num >= SYSCALLS_NR ) … … 128 183 } 129 184 130 #if( CONFIG_SYSCALL_DEBUG & 0x1)131 printk("\n[DBG] %s : pid = %x / trdid = %x / service #%d\n"132 " arg0 = %x / arg1 = %x / arg2 = %x / arg3 = %x\n",133 __FUNCTION__ , this->process->pid , this->trdid , service_num , arg0 , arg1 , arg2 , arg3 );134 #endif135 136 185 // reset errno 137 186 this->errno = 0; … … 140 189 error = syscall_tbl[service_num] ( arg0 , arg1 , arg2 , arg3 ); 141 190 142 // disable interrupt143 hal_disable_irq( NULL );144 145 191 // update kernel time 146 192 thread_kernel_time_update( this ); -
trunk/kernel/kern/do_syscall.h
r407 r408 30 30 #include <thread.h> 31 31 32 /************************************************************************************** ******32 /************************************************************************************** 33 33 * This function calls the kernel function defined by the <service_num> argument. 34 * The possible values for servic_num are defined in the syscalls/syscalls.h file. 35 ******************************************************************************************** 34 * The possible values for service_num are defined in the syscalls/syscalls.h file. 35 * It does NOT enable interrupts, that must be enabled by the kernel function 36 * depending on the implemented service. 37 ************************************************************************************** 36 38 * @ this : pointer on calling thread descriptor 37 39 * @ arg0 : kernel function argument 0 … … 41 43 * @ service_num : kernel service index 42 44 * @ return 0 if success / return non zero if failure. 43 ************************************************************************************* ******/45 *************************************************************************************/ 44 46 reg_t do_syscall( thread_t * this, 45 47 reg_t arg0, -
trunk/kernel/kern/kernel_init.c
r407 r408 122 122 vfs_ctx_t fs_context[FS_TYPES_NR] CONFIG_CACHE_LINE_ALIGNED; 123 123 124 // These variables are used by the sched_yield function to save SR value 125 __attribute__((section(".kdata"))) 126 uint32_t switch_save_sr[CONFIG_MAX_LOCAL_CORES] CONFIG_CACHE_LINE_ALIGNED; 127 128 #if CONFIG_READ_DEBUG 124 125 // TODO remove these debug variables used dans sys_read() 126 127 #if CONFIG_READ_DEBUG 129 128 uint32_t enter_sys_read; 130 129 uint32_t exit_sys_read; … … 342 341 /////////////////////////////////////////////////////////////////////////////////////////// 343 342 // This function allocates memory and initializes the chdev descriptors for the 344 // external (shared) peripherals other than the IOPIC, as specified by the boot_info ,345 // includingthe dynamic linking with the driver for the specified implementation.343 // external (shared) peripherals other than the IOPIC, as specified by the boot_info. 344 // This includes the dynamic linking with the driver for the specified implementation. 346 345 // These chdev descriptors are distributed on all clusters, using a modulo on a global 347 // index, identically computed in all clusters: In each cluster, the local CP0 core 348 // computes the global index for all external chdevs, and creates only the chdevs that 349 // must be placed in the local cluster. 346 // index, identically computed in all clusters. 347 // This function is executed in all clusters by the CP0 core, that computes a global index 348 // for all external chdevs. Each CP0 core creates only the chdevs that must be placed in 349 // the local cluster, because the global index matches the local index. 350 350 // The relevant entries in all copies of the devices directory are initialised. 351 351 /////////////////////////////////////////////////////////////////////////////////////////// … … 830 830 831 831 // all CP0s initialize the process_zero descriptor 832 if( core_lid == 0 ) process_ reference_init( &process_zero , 0 , XPTR_NULL);832 if( core_lid == 0 ) process_zero_init( &process_zero ); 833 833 834 834 // CP0 in cluster 0 initializes the PIC chdev, -
trunk/kernel/kern/printk.c
r407 r408 453 453 } 454 454 455 ////////////////////////// 456 void puts( char * string ) 457 { 458 uint32_t save_sr; 459 uint32_t n = 0; 460 461 // compute string length 462 while ( string[n] > 0 ) n++; 463 464 // get pointers on TXT0 chdev 465 xptr_t txt0_xp = chdev_dir.txt_tx[0]; 466 cxy_t txt0_cxy = GET_CXY( txt0_xp ); 467 chdev_t * txt0_ptr = GET_PTR( txt0_xp ); 468 469 // get extended pointer on remote TXT0 chdev lock 470 xptr_t lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock ); 471 472 // get TXT0 lock in busy waiting mode 473 remote_spinlock_lock_busy( lock_xp , &save_sr ); 474 475 // display string on TTY0 476 dev_txt_sync_write( string , n ); 477 478 // release TXT0 lock in busy waiting mode 479 remote_spinlock_unlock_busy( lock_xp , save_sr ); 480 } 481 482 483 ///////////////////////// 484 void putx( uint32_t val ) 485 { 486 static const char HexaTab[] = "0123456789ABCDEF"; 487 488 char buf[10]; 489 uint32_t c; 490 uint32_t save_sr; 491 492 buf[0] = '0'; 493 buf[1] = 'x'; 494 495 // build buffer 496 for (c = 0; c < 8; c++) 497 { 498 buf[9 - c] = HexaTab[val & 0xF]; 499 val = val >> 4; 500 } 501 502 // get pointers on TXT0 chdev 503 xptr_t txt0_xp = chdev_dir.txt_tx[0]; 504 cxy_t txt0_cxy = GET_CXY( txt0_xp ); 505 chdev_t * txt0_ptr = GET_PTR( txt0_xp ); 506 507 // get extended pointer on remote TXT0 chdev lock 508 xptr_t lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock ); 509 510 // get TXT0 lock in busy waiting mode 511 remote_spinlock_lock_busy( lock_xp , &save_sr ); 512 513 // display string on TTY0 514 dev_txt_sync_write( buf , 10 ); 515 516 // release TXT0 lock in busy waiting mode 517 remote_spinlock_unlock_busy( lock_xp , save_sr ); 518 } 519 520 ///////////////////////// 521 void putl( uint64_t val ) 522 { 523 static const char HexaTab[] = "0123456789ABCDEF"; 524 525 char buf[18]; 526 uint32_t c; 527 uint32_t save_sr; 528 529 buf[0] = '0'; 530 buf[1] = 'x'; 531 532 // build buffer 533 for (c = 0; c < 16; c++) 534 { 535 buf[17 - c] = HexaTab[(unsigned int)val & 0xF]; 536 val = val >> 4; 537 } 538 539 // get pointers on TXT0 chdev 540 xptr_t txt0_xp = chdev_dir.txt_tx[0]; 541 cxy_t txt0_cxy = GET_CXY( txt0_xp ); 542 chdev_t * txt0_ptr = GET_PTR( txt0_xp ); 543 544 // get extended pointer on remote TXT0 chdev lock 545 xptr_t lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock ); 546 547 // get TXT0 lock in busy waiting mode 548 remote_spinlock_lock_busy( lock_xp , &save_sr ); 549 550 // display string on TTY0 551 dev_txt_sync_write( buf , 18 ); 552 553 // release TXT0 lock in busy waiting mode 554 remote_spinlock_unlock_busy( lock_xp , save_sr ); 555 } 556 455 557 456 558 // Local Variables: -
trunk/kernel/kern/printk.h
r407 r408 90 90 91 91 /********************************************************************************** 92 * This function displays a "PANIC" message and forces the calling core in93 * sleeping mode if a Boolean condition is false.94 * Th ese functions areactually used to debug the kernel...92 * This function displays a formated message on kernel TXT0 terminal, 93 * and forces the calling core in sleeping mode if a Boolean condition is false. 94 * This function is actually used to debug the kernel... 95 95 ********************************************************************************** 96 96 * @ condition : condition that must be true. … … 101 101 const char * function_name, 102 102 char * format , ... ); 103 104 /********************************************************************************** 105 * This function displays a non-formated message on kernel TXT0 terminal. 106 * This function is actually used to debug the assembly level kernel functions. 107 ********************************************************************************** 108 * @ string : non-formatted string. 109 *********************************************************************************/ 110 void puts( char * string ); 111 112 /********************************************************************************** 113 * This function displays a 32 bits value in hexadecimal on kernel TXT0 terminal. 114 * This function is actually used to debug the assembly level kernel functions. 115 ********************************************************************************** 116 * @ val : 32 bits unsigned value. 117 *********************************************************************************/ 118 void putx( uint32_t val ); 119 120 /********************************************************************************** 121 * This function displays a 64 bits value in hexadecimal on kernel TXT0 terminal. 122 * This function is actually used to debug the assembly level kernel functions. 123 ********************************************************************************** 124 * @ val : 64 bits unsigned value. 125 *********************************************************************************/ 126 void putl( uint64_t val ); 127 103 128 104 129 #define panic(fmt, ...) _panic("\n[PANIC] %s(): " fmt "\n", __func__, ##__VA_ARGS__) -
trunk/kernel/kern/process.c
r407 r408 82 82 } 83 83 84 ///////////////////////////////////////////// 85 void process_zero_init( process_t * process ) 86 { 87 // initialize PID, PPID anf PREF 88 process->pid = 0; 89 process->ppid = 0; 90 process->ref_xp = XPTR( local_cxy , process ); 91 92 // reset th_tbl[] array as empty 93 uint32_t i; 94 for( i = 0 ; i < CONFIG_THREAD_MAX_PER_CLUSTER ; i++ ) 95 { 96 process->th_tbl[i] = NULL; 97 } 98 process->th_nr = 0; 99 spinlock_init( &process->th_lock ); 100 101 hal_fence(); 102 103 process_dmsg("\n[DBG] %s : core[%x,%d] exit for process %x\n", 104 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , process->pid ); 105 106 } // end process_zero_init() 107 84 108 ///////////////////////////////////////////////// 85 109 void process_reference_init( process_t * process, 86 110 pid_t pid, 87 xptr_t parent_xp ) 88 { 89 cxy_t parent_cxy; 90 process_t * parent_ptr; 91 pid_t parent_pid; 92 111 pid_t ppid, 112 xptr_t model_xp ) 113 { 114 cxy_t model_cxy; 115 process_t * model_ptr; 93 116 error_t error1; 94 117 error_t error2; … … 104 127 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , pid ); 105 128 106 // get parent process cluster, local pointer, and pid 107 // for all processes other than kernel process 108 if( process == &process_zero ) // kernel process 109 { 110 assert( (pid == 0) , __FUNCTION__ , "process_zero must have PID = 0\n"); 111 112 parent_cxy = 0; 113 parent_ptr = NULL; 114 parent_pid = 0; 115 } 116 else // user process 117 { 118 parent_cxy = GET_CXY( parent_xp ); 119 parent_ptr = (process_t *)GET_PTR( parent_xp ); 120 parent_pid = hal_remote_lw( XPTR( parent_cxy , &parent_ptr->pid ) ); 121 } 129 // get model process cluster and local pointer 130 model_cxy = GET_CXY( model_xp ); 131 model_ptr = (process_t *)GET_PTR( model_xp ); 122 132 123 133 // initialize PID, PPID, and REF 124 134 process->pid = pid; 125 process->ppid = p arent_pid;135 process->ppid = ppid; 126 136 process->ref_xp = XPTR( local_cxy , process ); 127 137 128 // initialize vmm, fd array and others structures for user processes. 129 // These structures are not used by the kernel process. 130 if( pid ) 131 { 132 // initialize vmm (not for kernel) 133 vmm_init( process ); 134 135 process_dmsg("\n[DBG] %s : core[%x,%d] / vmm initialised for process %x\n", 138 // initialize vmm 139 vmm_init( process ); 140 141 process_dmsg("\n[DBG] %s : core[%x,%d] / vmm empty for process %x\n", 136 142 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , pid ); 137 143 138 // initialize fd_array (not for kernel) 139 process_fd_init( process ); 140 141 // create stdin / stdout / stderr pseudo-files (not for kernel) 142 if( parent_pid == 0 ) // process_init 143 { 144 error1 = vfs_open( process, 145 CONFIG_INIT_STDIN, 146 O_RDONLY, 147 0, // FIXME chmod 148 &stdin_xp, 149 &stdin_id ); 150 151 error2 = vfs_open( process, 152 CONFIG_INIT_STDOUT, 153 O_WRONLY, 154 0, // FIXME chmod 155 &stdout_xp, 156 &stdout_id ); 157 158 error3 = vfs_open( process, 159 CONFIG_INIT_STDERR, 160 O_WRONLY, 161 0, // FIXME chmod 162 &stderr_xp, 163 &stderr_id ); 164 } 165 else // user process 166 { 167 error1 = vfs_open( process, 168 CONFIG_USER_STDIN, 169 O_RDONLY, 170 0, // FIXME chmod 171 &stdin_xp, 172 &stdin_id ); 173 174 error2 = vfs_open( process, 175 CONFIG_USER_STDOUT, 176 O_WRONLY, 177 0, // FIXME chmod 178 &stdout_xp, 179 &stdout_id ); 180 181 error3 = vfs_open( process, 182 CONFIG_USER_STDERR, 183 O_WRONLY, 184 0, // FIXME chmod 185 &stderr_xp, 186 &stderr_id ); 187 } 188 189 assert( ((error1 == 0) && (error2 == 0) && (error3 == 0)) , __FUNCTION__ , 190 "cannot open stdin/stdout/stderr pseudo files\n"); 191 192 assert( ((stdin_id == 0) && (stdout_id == 1) && (stderr_id == 2)) , __FUNCTION__ , 193 "bad indexes : stdin %d / stdout %d / stderr %d \n", stdin_id , stdout_id , stderr_id ); 144 // initialize fd_array (not for kernel) 145 process_fd_init( process ); 146 147 // create stdin / stdout / stderr pseudo-files 148 if( ppid == 0 ) // process_init 149 { 150 error1 = vfs_open( process, 151 CONFIG_INIT_STDIN, 152 O_RDONLY, 153 0, // FIXME chmod 154 &stdin_xp, 155 &stdin_id ); 156 157 error2 = vfs_open( process, 158 CONFIG_INIT_STDOUT, 159 O_WRONLY, 160 0, // FIXME chmod 161 &stdout_xp, 162 &stdout_id ); 163 164 error3 = vfs_open( process, 165 CONFIG_INIT_STDERR, 166 O_WRONLY, 167 0, // FIXME chmod 168 &stderr_xp, 169 &stderr_id ); 170 } 171 else // other user process 172 { 173 error1 = vfs_open( process, 174 CONFIG_USER_STDIN, 175 O_RDONLY, 176 0, // FIXME chmod 177 &stdin_xp, 178 &stdin_id ); 179 180 error2 = vfs_open( process, 181 CONFIG_USER_STDOUT, 182 O_WRONLY, 183 0, // FIXME chmod 184 &stdout_xp, 185 &stdout_id ); 186 187 error3 = vfs_open( process, 188 CONFIG_USER_STDERR, 189 O_WRONLY, 190 0, // FIXME chmod 191 &stderr_xp, 192 &stderr_id ); 193 } 194 195 assert( ((error1 == 0) && (error2 == 0) && (error3 == 0)) , __FUNCTION__ , 196 "cannot open stdin/stdout/stderr pseudo files\n"); 197 198 assert( ((stdin_id == 0) && (stdout_id == 1) && (stderr_id == 2)) , __FUNCTION__ , 199 "bad indexes : stdin %d / stdout %d / stderr %d \n", stdin_id , stdout_id , stderr_id ); 200 201 // initialize specific files, cwd_lock, and fd_array 202 process->vfs_root_xp = (xptr_t)hal_remote_lwd( XPTR( model_cxy, 203 &model_ptr->vfs_root_xp ) ); 204 process->vfs_cwd_xp = (xptr_t)hal_remote_lwd( XPTR( model_cxy, 205 &model_ptr->vfs_cwd_xp ) ); 206 process->vfs_bin_xp = (xptr_t)hal_remote_lwd( XPTR( model_cxy, 207 &model_ptr->vfs_bin_xp ) ); 208 vfs_file_count_up( process->vfs_root_xp ); 209 vfs_file_count_up( process->vfs_cwd_xp ); 210 vfs_file_count_up( process->vfs_bin_xp ); 211 212 process_fd_remote_copy( XPTR( local_cxy , &process->fd_array ), 213 XPTR( model_cxy , &model_ptr->fd_array ) ); 214 215 remote_rwlock_init( XPTR( local_cxy , &process->cwd_lock ) ); 194 216 195 217 process_dmsg("\n[DBG] %s : core[%x,%d] / fd array initialised for process %x\n", 196 218 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , pid ); 197 219 198 199 // reset reference process files structures and cwd_lock (not for kernel) 200 process->vfs_root_xp = XPTR_NULL; 201 process->vfs_bin_xp = XPTR_NULL; 202 process->vfs_cwd_xp = XPTR_NULL; 203 remote_rwlock_init( XPTR( local_cxy , &process->cwd_lock ) ); 204 205 // reset children list root (not for kernel) 206 xlist_root_init( XPTR( local_cxy , &process->children_root ) ); 207 process->children_nr = 0; 208 209 // reset semaphore / mutex / barrier / condvar list roots (nor for kernel) 210 xlist_root_init( XPTR( local_cxy , &process->sem_root ) ); 211 xlist_root_init( XPTR( local_cxy , &process->mutex_root ) ); 212 xlist_root_init( XPTR( local_cxy , &process->barrier_root ) ); 213 xlist_root_init( XPTR( local_cxy , &process->condvar_root ) ); 214 remote_spinlock_init( XPTR( local_cxy , &process->sync_lock ) ); 215 216 // register new process in the parent children list (nor for kernel) 217 xptr_t entry = XPTR( local_cxy , &process->brothers_list ); 218 xptr_t root = XPTR( parent_cxy , &parent_ptr->children_root ); 219 xlist_add_first( root , entry ); 220 } 221 222 // reset th_tbl[] array as empty 220 // reset children list root 221 xlist_root_init( XPTR( local_cxy , &process->children_root ) ); 222 process->children_nr = 0; 223 224 // reset semaphore / mutex / barrier / condvar list roots 225 xlist_root_init( XPTR( local_cxy , &process->sem_root ) ); 226 xlist_root_init( XPTR( local_cxy , &process->mutex_root ) ); 227 xlist_root_init( XPTR( local_cxy , &process->barrier_root ) ); 228 xlist_root_init( XPTR( local_cxy , &process->condvar_root ) ); 229 remote_spinlock_init( XPTR( local_cxy , &process->sync_lock ) ); 230 231 // register new process in the local cluster manager pref_tbl[] 232 lpid_t lpid = LPID_FROM_PID( pid ); 233 LOCAL_CLUSTER->pmgr.pref_tbl[lpid] = XPTR( local_cxy , process ); 234 235 // register new process descriptor in local cluster manager local_list 236 cluster_process_local_link( process ); 237 238 // register new process descriptor in local cluster manager copies_list 239 cluster_process_copies_link( process ); 240 241 // reset th_tbl[] array as empty in process descriptor 223 242 uint32_t i; 224 243 for( i = 0 ; i < CONFIG_THREAD_MAX_PER_CLUSTER ; i++ ) … … 228 247 process->th_nr = 0; 229 248 spinlock_init( &process->th_lock ); 230 231 // register new process descriptor in local cluster manager local_list232 cluster_process_local_link( process );233 234 // register new process descriptor in owner cluster manager copies_list235 cluster_process_copies_link( process );236 237 // initialize signal manager TODO [AG]238 249 239 250 hal_fence(); … … 370 381 uint32_t count; // thread counter 371 382 372 printk("\n @@@%s enter\n", __FUNCTION__ );383 printk("\n[@@@] %s enter\n", __FUNCTION__ ); 373 384 374 385 // get lock protecting th_tbl[] … … 390 401 } 391 402 392 printk("\n @@@%s : %d signal(s) sent\n", __FUNCTION__, count );403 printk("\n[@@@] %s : %d signal(s) sent\n", __FUNCTION__, count ); 393 404 394 405 // second loop on threads to wait acknowledge from scheduler, … … 403 414 { 404 415 405 printk("\n @@@%s start polling at cycle %d\n", __FUNCTION__ , hal_time_stamp() );416 printk("\n[@@@] %s start polling at cycle %d\n", __FUNCTION__ , hal_time_stamp() ); 406 417 407 418 // poll the THREAD_SIG_KILL bit until reset 408 419 while( thread->signals & THREAD_SIG_KILL ) asm volatile( "nop" ); 409 420 410 printk("\n @@@%s exit polling\n", __FUNCTION__ );421 printk("\n[@@@] %s exit polling\n", __FUNCTION__ ); 411 422 412 423 // detach target thread from parent if attached … … 424 435 } 425 436 426 printk("\n @@@%s : %d ack(s) received\n", __FUNCTION__, count );437 printk("\n[@@@] %s : %d ack(s) received\n", __FUNCTION__, count ); 427 438 428 439 // release lock protecting th_tbl[] … … 432 443 process_destroy( process ); 433 444 434 printk("\n[ @@@] %s : core[%x,%d] exit\n",445 printk("\n[DBG] %s : core[%x,%d] exit\n", 435 446 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid ); 436 447 … … 688 699 } // process_remove_thread() 689 700 701 ///////////////////////////////////////////////////////// 702 error_t process_make_fork( xptr_t parent_process_xp, 703 xptr_t parent_thread_xp, 704 pid_t * child_pid, 705 thread_t ** child_thread ) 706 { 707 process_t * process; // local pointer on child process descriptor 708 thread_t * thread; // local pointer on child thread descriptor 709 pid_t new_pid; // process identifier for child process 710 pid_t parent_pid; // process identifier for parent process 711 xptr_t ref_xp; // extended pointer on reference process 712 error_t error; 713 714 // get cluster and local pointer for parent process 715 cxy_t parent_process_cxy = GET_CXY( parent_process_xp ); 716 process_t * parent_process_ptr = (process_t *)GET_PTR( parent_process_xp ); 717 718 // get parent process PID 719 parent_pid = hal_remote_lw( XPTR( parent_process_cxy , &parent_process_ptr->pid ) ); 720 721 // check parent process is the reference 722 ref_xp = hal_remote_lwd( XPTR( parent_process_cxy , &parent_process_ptr->ref_xp ) ); 723 assert( (parent_process_xp == ref_xp ) , __FUNCTION__ , 724 "parent process must be the reference process\n" ); 725 726 process_dmsg("\n[DBG] %s : core[%x,%d] enter at cycle %d\n", 727 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid , hal_get_cycles() ); 728 729 // allocate a process descriptor 730 process = process_alloc(); 731 if( process == NULL ) 732 { 733 printk("\n[ERROR] in %s : cannot get process in cluster %x\n", 734 __FUNCTION__, local_cxy ); 735 return -1; 736 } 737 738 process_dmsg("\n[DBG] %s : core[%x,%d] child process descriptor allocated at cycle %d\n", 739 __FUNCTION__ , local_cxy, CURRENT_THREAD->core->lid, hal_get_cycles() ); 740 741 // allocate a child PID from local cluster 742 error = cluster_pid_alloc( XPTR( local_cxy , process ) , &new_pid ); 743 if( (error != 0) || (new_pid == 0) ) 744 { 745 printk("\n[ERROR] in %s : cannot get PID in cluster %x\n", 746 __FUNCTION__, local_cxy ); 747 process_free( process ); 748 return -1; 749 } 750 751 process_dmsg("\n[DBG] %s : core[%x, %d] child process PID allocated = %x at cycle %d\n", 752 __FUNCTION__ , local_cxy, CURRENT_THREAD->core->lid, new_pid , hal_get_cycles() ); 753 754 // initializes child process descriptor from parent process descriptor 755 process_reference_init( process, 756 new_pid, 757 parent_pid, 758 parent_process_xp ); 759 760 process_dmsg("\n[DBG] %s : core[%x, %d] child process initialised at cycle %d\n", 761 __FUNCTION__ , local_cxy, CURRENT_THREAD->core->lid, hal_get_cycles() ); 762 763 // copy VMM from parent descriptor to child descriptor 764 error = vmm_fork_copy( process, 765 parent_process_xp ); 766 if( error ) 767 { 768 printk("\n[ERROR] in %s : cannot copy VMM in cluster %x\n", 769 __FUNCTION__, local_cxy ); 770 process_free( process ); 771 cluster_pid_release( new_pid ); 772 return -1; 773 } 774 775 process_dmsg("\n[DBG] %s : core[%x, %d] child process VMM copied at cycle %d\n", 776 __FUNCTION__ , local_cxy, CURRENT_THREAD->core->lid, hal_get_cycles() ); 777 778 // create child thread descriptor from parent thread descriptor 779 error = thread_user_fork( parent_thread_xp, 780 process, 781 &thread ); 782 if( error ) 783 { 784 printk("\n[ERROR] in %s : cannot create thread in cluster %x\n", 785 __FUNCTION__, local_cxy ); 786 process_free( process ); 787 cluster_pid_release( new_pid ); 788 return -1; 789 } 790 791 process_dmsg("\n[DBG] %s : core[%x,%d] child thread created at cycle %d\n", 792 __FUNCTION__ , local_cxy, CURRENT_THREAD->core->lid, hal_get_cycles() ); 793 794 // update parent process GPT to set Copy_On_Write for shared data vsegs 795 // this includes all replicated GPT copies 796 if( parent_process_cxy == local_cxy ) // reference is local 797 { 798 vmm_set_cow( parent_process_ptr ); 799 } 800 else // reference is remote 801 { 802 rpc_vmm_set_cow_client( parent_process_cxy, 803 parent_process_ptr ); 804 } 805 806 process_dmsg("\n[DBG] %s : core[%x,%d] COW set in parent_process at cycle %d\n", 807 __FUNCTION__ , local_cxy, CURRENT_THREAD->core->lid, hal_get_cycles() ); 808 809 // update children list in parent process 810 xlist_add_last( XPTR( parent_process_cxy , &parent_process_ptr->children_root ), 811 XPTR( local_cxy , &process->brothers_list ) ); 812 hal_remote_atomic_add( XPTR( parent_process_cxy, 813 &parent_process_ptr->children_nr), 1 ); 814 815 // vmm_display( process , true ); 816 // vmm_display( parent_process_ptr , true ); 817 // sched_display( 0 ); 818 819 // return success 820 *child_thread = thread; 821 *child_pid = new_pid; 822 823 return 0; 824 825 } // end process_make_fork() 826 690 827 ///////////////////////////////////////////////////// 691 828 error_t process_make_exec( exec_info_t * exec_info ) 692 829 { 693 char * path; // pathname to .elf file 694 bool_t keep_pid; // new process keep parent PID if true 695 process_t * process; // local pointer on new process 696 pid_t pid; // new process pid 697 xptr_t parent_xp; // extended pointer on parent process 698 cxy_t parent_cxy; // parent process local cluster 699 process_t * parent_ptr; // local pointer on parent process 700 uint32_t parent_pid; // parent process identifier 701 thread_t * thread; // pointer on new thread 702 pthread_attr_t attr; // main thread attributes 703 core_t * core; // pointer on selected core 704 lid_t lid; // selected core local index 830 char * path; // pathname to .elf file 831 process_t * old; // local pointer on old process 832 process_t * new; // local pointer on new process 833 pid_t pid; // old process identifier 834 thread_t * thread; // pointer on new thread 835 pthread_attr_t attr; // main thread attributes 836 lid_t lid; // selected core local index 705 837 error_t error; 706 838 707 // get .elf pathname, parent_xp, and keep_pid flag from exec_info 708 path = exec_info->path; 709 parent_xp = exec_info->parent_xp; 710 keep_pid = exec_info->keep_pid; 711 712 process_dmsg("\n[DBG] %s : core[%x,%d] enters for path = %s\n", 713 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, path ); 714 715 // get parent process cluster and local pointer 716 parent_cxy = GET_CXY( parent_xp ); 717 parent_ptr = (process_t *)GET_PTR( parent_xp ); 718 parent_pid = hal_remote_lw( XPTR( parent_cxy , &parent_ptr->pid ) ); 719 720 // allocates memory for process descriptor 721 process = process_alloc(); 722 if( process == NULL ) return -1; 723 724 // get PID 725 if( keep_pid ) // keep parent PID 726 { 727 pid = parent_pid; 728 } 729 else // get new PID from local cluster 730 { 731 error = cluster_pid_alloc( XPTR( local_cxy , process ) , &pid ); 732 if( error ) return -1; 733 } 734 735 process_dmsg("\n[DBG] %s : core[%x,%d] created process %x for path = %s\n", 839 // get .elf pathname and PID from exec_info 840 path = exec_info->path; 841 pid = exec_info->pid; 842 843 // check local cluster is old process owner 844 assert( (CXY_FROM_PID( pid ) == local_cxy), __FUNCTION__, 845 "local cluster %x is not owner for process %x\n", local_cxy, pid ); 846 847 exec_dmsg("\n[DBG] %s : core[%x,%d] enters for process %x / path = %s\n", 848 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, pid , path ); 849 850 // get old process local pointer 851 old = (process_t *)cluster_get_local_process_from_pid( pid ); 852 853 assert( (old != NULL ) , __FUNCTION__ , 854 "process %x not found in cluster %x\n", pid , local_cxy ); 855 856 // allocate memory for new process descriptor 857 new = process_alloc(); 858 859 // initialize new process descriptor 860 process_reference_init( new, 861 old->pid, // same as old 862 old->ppid, // same as old 863 XPTR( local_cxy , old ) ); 864 865 exec_dmsg("\n[DBG] %s : core[%x,%d] created new process %x / path = %s\n", 736 866 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, pid, path ); 737 867 738 // initialize the process descriptor as the reference 739 process_reference_init( process , pid , parent_xp ); 740 741 process_dmsg("\n[DBG] %s : core[%x,%d] initialized process %x / path = %s\n", 742 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, pid, path ); 743 744 // initialize vfs_root and vfs_cwd from parent process 745 xptr_t vfs_root_xp = hal_remote_lwd( XPTR( parent_cxy , &parent_ptr->vfs_root_xp ) ); 746 vfs_file_count_up( vfs_root_xp ); 747 process->vfs_root_xp = vfs_root_xp; 748 749 xptr_t vfs_cwd_xp = hal_remote_lwd( XPTR( parent_cxy , &parent_ptr->vfs_cwd_xp ) ); 750 vfs_file_count_up( vfs_cwd_xp ); 751 process->vfs_cwd_xp = vfs_cwd_xp; 752 753 // initialize embedded fd_array from parent process 754 process_fd_remote_copy( XPTR( local_cxy , &process->fd_array ), 755 XPTR( parent_cxy , &parent_ptr->fd_array) ); 756 757 process_dmsg("\n[DBG] %s : core[%x,%d] copied fd_array for process %x\n", 758 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, pid ); 759 760 // register "code" and "data" vsegs as well as the process entry-point in VMM, 761 // using information contained in the elf file. 762 error = elf_load_process( path , process ); 763 764 if( error ) 868 // register "code" and "data" vsegs as well as entry-point 869 // in new process VMM, using information contained in the elf file. 870 if( elf_load_process( path , new ) ) 765 871 { 766 872 printk("\n[ERROR] in %s : failed to access .elf file for process %x / path = %s\n", 767 768 process_destroy( process);769 return error;873 __FUNCTION__, pid , path ); 874 process_destroy( new ); 875 return -1; 770 876 } 771 877 772 process_dmsg("\n[DBG] %s : core[%x,%d] registered code/data vsegs forprocess %x / path = %s\n",878 exec_dmsg("\n[DBG] %s : core[%x,%d] registered code/data vsegs / process %x / path = %s\n", 773 879 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, pid, path ); 774 880 775 // select a core in cluster881 // select a core in local cluster to execute the main thread 776 882 lid = cluster_select_local_core(); 777 core = &LOCAL_CLUSTER->core_tbl[lid];778 883 779 884 // initialize pthread attributes for main thread … … 784 889 // create and initialize thread descriptor 785 890 error = thread_user_create( pid, 786 (void *) process->vmm.entry_point,891 (void *)new->vmm.entry_point, 787 892 exec_info->args_pointers, 788 893 &attr, … … 792 897 printk("\n[ERROR] in %s : cannot create thread for process %x / path = %s\n", 793 898 __FUNCTION__, pid , path ); 794 process_destroy( process);795 return error;899 process_destroy( new ); 900 return -1; 796 901 } 797 902 798 process_dmsg("\n[DBG] %s : core[%x,%d] created thread %x for process %x / path = %s\n", 799 __FUNCTION__ , local_cxy, CURRENT_THREAD->core->lid, thread->trdid, pid, path ); 800 801 // update children list in parent process 802 xlist_add_last( XPTR( parent_cxy , &parent_ptr->children_root ), 803 XPTR( local_cxy , &process->brothers_list ) ); 804 hal_remote_atomic_add( XPTR( parent_cxy , &parent_ptr->children_nr) , 1 ); 903 exec_dmsg("\n[DBG] %s : core[%x,%d] created main thread %x for new process %x\n", 904 __FUNCTION__ , local_cxy, CURRENT_THREAD->core->lid, thread->trdid, pid ); 905 906 // update children list (rooted in parent process) 907 xlist_replace( XPTR( local_cxy , &old->brothers_list ) , 908 XPTR( local_cxy , &new->brothers_list ) ); 909 910 // FIXME request destruction of old process copies and threads in all clusters 805 911 806 912 // activate new thread 807 913 thread_unblock( XPTR( local_cxy , thread ) , THREAD_BLOCKED_GLOBAL ); 808 914 809 process_dmsg("\n[DBG] %s : core[%x,%d] exit for path = %s\n",915 exec_dmsg("\n[DBG] %s : core[%x,%d] exit for path = %s\n", 810 916 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, path ); 811 917 … … 818 924 { 819 925 exec_info_t exec_info; // structure to be passed to process_make_exec() 820 xptr_t parent_xp; // extended pointer on parent process. 926 process_t * process; // local pointer on process_init descriptor 927 pid_t pid; // process_init identifier 821 928 error_t error; 822 929 … … 824 931 __FUNCTION__ , local_cxy ); 825 932 826 // parent process is local kernel process 827 parent_xp = XPTR( local_cxy , &process_zero ); 933 // allocates memory for process descriptor from local cluster 934 process = process_alloc(); 935 if( process == NULL ) 936 { 937 printk("\n[PANIC] in %s : no memory for process descriptor in cluster %x\n", 938 __FUNCTION__, local_cxy ); 939 } 940 941 // get new PID from local cluster 942 error = cluster_pid_alloc( XPTR( local_cxy , process ) , &pid ); 943 if( error ) 944 { 945 printk("\n[PANIC] in %s : cannot allocate PID in cluster %x\n", 946 __FUNCTION__, local_cxy ); 947 } 948 949 // initialise the process desciptor (parent is local kernel process) 950 process_reference_init( process, 951 pid, 952 process_zero.pid, 953 XPTR( local_cxy , &process_zero ) ); 828 954 829 955 // initialize the exec_info structure 830 exec_info.keep_pid = false; 831 exec_info.parent_xp = parent_xp; 832 strcpy( exec_info.path , CONFIG_PROCESS_INIT_PATH ); 956 exec_info.pid = pid; 833 957 exec_info.args_nr = 0; 834 958 exec_info.envs_nr = 0; 835 836 // initialize process_init and create thread_init 959 strcpy( exec_info.path , CONFIG_PROCESS_INIT_PATH ); 960 961 // update process descriptor and create thread descriptor 837 962 error = process_make_exec( &exec_info ); 838 963 839 if( error ) panic("cannot initialize process_init in cluster %x", local_cxy ); 964 if( error ) 965 { 966 printk("\n[PANIC] in %s : cannot exec %s in cluster %x\n", 967 __FUNCTION__, CONFIG_PROCESS_INIT_PATH , local_cxy ); 968 } 840 969 841 970 process_dmsg("\n[DBG] %s : exit in cluster %x\n", -
trunk/kernel/kern/process.h
r407 r408 137 137 /********************************************************************************************* 138 138 * This structure defines the information required by the process_make_exec() function 139 * to create a new reference process descriptor, and the associated main thread. 139 * to create a new reference process descriptor, and the associated main thread, 140 * in the parent process owner cluster. 140 141 ********************************************************************************************/ 141 142 142 143 typedef struct exec_info_s 143 144 { 144 xptr_t parent_xp; /*! extended pointer on parent process descriptor */ 145 bool_t keep_pid; /*! keep parent PID if true / new PID if false */ 145 pid_t pid; /*! process identifier (both parent and child) */ 146 146 147 147 char path[CONFIG_VFS_MAX_PATH_LENGTH]; /*! .elf file path */ … … 187 187 188 188 /********************************************************************************************* 189 * This function initializes a new process descriptor, in the reference cluster. 190 * The PID value must have been defined previously by the owner cluster manager. 191 * The reference cluster can be different from the owner cluster. 192 * It set the pid / ppid / ref_xp fields. 193 * It registers this process descriptor in three lists: 194 * - the children_list in the parent reference process descriptor. 195 * - the local_list, rooted in the reference cluster manager. 196 * - the copies_list, rooted in the owner cluster manager. 197 * It resets the embedded structures such as the VMM or the file descriptor array. 198 ********************************************************************************************* 199 * @ process : [in] pointer on process descriptor to initialize. 200 * @ pid : [in] process identifier defined by owner cluster. 201 * @ parent_xp : [in] extended pointer on parent process. 189 * This function initialize, in each cluster, the kernel "process_zero", that is the owner 190 * of all kernel threads in a given cluster. It is called by the kernel_init() function. 191 * Both the PID and PPID fields are set to zero, and the ref_xp is the local process_zero. 192 * The th_tbl[] is initialized as empty. 193 ********************************************************************************************* 194 * @ process : [in] pointer on local process descriptor to initialize. 195 ********************************************************************************************/ 196 void process_zero_init( process_t * process ); 197 198 /********************************************************************************************* 199 * This function initializes a local, reference user process descriptor from another process 200 * descriptor, defined by the <model_xp> argument. The <process> descriptor, the <pid>, and 201 * the <ppid> arguments must be previously defined by the caller. 202 * It can be called by three functions, depending on the process type: 203 * 1) if "process" is the user "process_init", the parent is the kernel process. It is 204 * called once, by the process_init_create() function in cluster[xmax-1][ymax-1]. 205 * 2) if the caller is the process_make_fork() function, the model is generally a remote 206 * process, that is also the parent process. 207 * 3) if the caller is the process_make_exec() function, the model is always a local process, 208 * but the parent is the parent of the model process. 209 * 210 * The following fields are initialised (for all process but process_zero). 211 * - It set the pid / ppid / ref_xp fields. 212 * - It initializes an empty VMM (no vsegs registered in VSL and GPT). 213 * - It initializes the FDT, defining the three pseudo files STDIN / STDOUT / STDERR. 214 * - It set the root_xp, bin_xp, cwd_xp fields. 215 * - It reset the children list as empty, but does NOT register it in parent children list. 216 * - It reset the TH_TBL list of threads as empty. 217 * - It reset the semaphore / mutex / barrier / condvar lists as empty. 218 * - It registers the process in the local_list, rooted in the local cluster manager. 219 * - It registers the process in the copies_list, rooted in the owner cluster manager. 220 * - It registers the process extended pointer in the local pref_tbl[] array. 221 ********************************************************************************************* 222 * @ process : [in] pointer on local process descriptor to initialize. 223 * @ pid : [in] process identifier. 224 * @ ppid : [in] parent process identifier. 225 * @ model_xp : [in] extended pointer on model process descriptor (local or remote). 202 226 ********************************************************************************************/ 203 227 void process_reference_init( process_t * process, 204 228 pid_t pid, 205 xptr_t parent_xp ); 229 pid_t ppid, 230 xptr_t model_xp ); 206 231 207 232 /********************************************************************************************* … … 249 274 250 275 /********************************************************************************************* 251 * This function allocates memory and initializes a new user process descriptor, 252 * and the associated main thread, from information found in the <exec_info> structure 253 * (defined in the process.h file), that must be built by the caller. 254 * - If the <keep_pid> field is true, the new process inherits its PID from the parent PID. 255 * - If the <keep_pid> field is false, a new PID is allocated from the local cluster manager. 256 * The new process inherits from the parent process (i) the open file descriptors, (ii) the 257 * vfs_root and the vfs_cwd inodes. 258 * It accesses the .elf file to get the size of the code and data segments, and initializes 259 * the vsegs list in the VMM. 260 * It is executed in the local cluster, that becomes both "owner" and "reference". 261 * - It can be called by the process_init_create() function to build the "init" process. 262 * - It can be called directly by the sys_exec() function in case of local exec. 263 * - It can be called through the rpc_process_exec_server() function in case of remote exec. 276 * This function implements the exec() system call, and is called by the sys_exec() function. 277 * It is also called by the process_init_create() function to build the "init" process. 278 * The "new" process keep the "old" process PID and PPID, all open files, and env variables, 279 * the vfs_root and vfs_cwd, but build a brand new memory image (new VMM from the new .elf). 280 * It actually creates a "new" reference process descriptor, saves all relevant information 281 * from the "old" reference process descriptor to the "new" process descriptor. 282 * It completes the "new" process descriptor, from information found in the <exec_info> 283 * structure (defined in the process.h file), that must be built by the caller. 284 * It creates and initializes the associated main thread. It finally destroys all copies 285 * of the "old" process in all clusters, and all the old associated threads. 286 * It is executed in the local cluster, that becomes both the "owner" and the "reference" 287 * cluster for the "new" process. 264 288 ********************************************************************************************* 265 289 * @ exec_info : [in] pointer on the exec_info structure. … … 268 292 error_t process_make_exec( exec_info_t * exec_info ); 269 293 294 /********************************************************************************************* 295 * This function implement the fork() system call, and is called by the sys_fork() function. 296 * It allocates memory and initializes a new "child" process descriptor, and the 297 * associated "child" thread descriptor in the local cluster. This function can involve 298 * up to three different clusters : 299 * - the local (child) cluster can be any cluster defined by the sys_fork function. 300 * - the parent cluster must be the reference clusterfor the parent process. 301 * - the client cluster containing the thread requestingthe fork can be any cluster. 302 * The new "child" process descriptor is initialised from informations found in the "parent" 303 * reference process descriptor, containing the complete process description. 304 * The new "child" thread descriptor is initialised from informations found in the "parent" 305 * thread descriptor. 306 ********************************************************************************************* 307 * @ parent_process_xp : extended pointer on the reference parent process. 308 * @ parent_thread_xp : extended pointer on the parent thread requesting the fork. 309 * @ child_pid : [out] child process identifier. 310 * @ child_thread_ptr : [out] local pointer on child thread in target cluster. 311 * @ return 0 if success / return non-zero if error. 312 ********************************************************************************************/ 313 error_t process_make_fork( xptr_t parent_process_xp, 314 xptr_t parent_thread_xp, 315 pid_t * child_pid, 316 struct thread_s ** child_thread_ptr ); 270 317 271 318 /******************** File Management Operations ****************************************/ -
trunk/kernel/kern/rpc.c
r407 r408 49 49 { 50 50 &rpc_pmem_get_pages_server, // 0 51 &rpc_process_ pid_alloc_server, // 152 &rpc_process_ exec_server,// 251 &rpc_process_make_exec_server, // 1 52 &rpc_process_make_fork_server, // 2 53 53 &rpc_process_kill_server, // 3 54 54 &rpc_thread_user_create_server, // 4 … … 78 78 &rpc_vmm_create_vseg_server, // 26 79 79 &rpc_sched_display_server, // 27 80 &rpc_ undefined,// 2880 &rpc_vmm_set_cow_server, // 28 81 81 &rpc_undefined, // 29 82 82 }; … … 148 148 149 149 ///////////////////////////////////////////////////////////////////////////////////////// 150 // [1] Marshaling functions attached to RPC_PROCESS_PID_ALLOC 151 ///////////////////////////////////////////////////////////////////////////////////////// 152 153 ////////////////////////////////////////////////// 154 void rpc_process_pid_alloc_client( cxy_t cxy, 155 process_t * process, // in 156 error_t * error, // out 157 pid_t * pid ) // out 158 { 159 rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n", 160 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 161 CURRENT_THREAD->core->lid , hal_time_stamp() ); 162 163 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); 164 165 // initialise RPC descriptor header 166 rpc_desc_t rpc; 167 rpc.index = RPC_PROCESS_PID_ALLOC; 168 rpc.response = 1; 169 170 // set input arguments in RPC descriptor 171 rpc.args[0] = (uint64_t)(intptr_t)process; 172 173 // register RPC request in remote RPC fifo (blocking function) 174 rpc_send_sync( cxy , &rpc ); 175 176 // get output arguments RPC descriptor 177 *pid = (pid_t)rpc.args[1]; 178 *error = (error_t)rpc.args[2]; 179 180 rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n", 181 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 182 CURRENT_THREAD->core->lid , hal_time_stamp() ); 183 } 184 185 ////////////////////////////////////////////// 186 void rpc_process_pid_alloc_server( xptr_t xp ) 187 { 188 process_t * process; // input : client process descriptor 189 error_t error; // output : error status 190 pid_t pid; // output : process identifier 191 192 rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n", 193 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 194 CURRENT_THREAD->core->lid , hal_time_stamp() ); 195 196 // get client cluster identifier and pointer on RPC descriptor 197 cxy_t client_cxy = (cxy_t)GET_CXY( xp ); 198 rpc_desc_t * desc = (rpc_desc_t *)GET_PTR( xp ); 199 200 // get input argument from client RPC descriptor 201 process = (process_t*)(intptr_t)hal_remote_lwd( XPTR( client_cxy , &desc->args[0] ) ); 202 203 // call local pid allocator 204 xptr_t xp_process = XPTR( client_cxy , process ); 205 error = cluster_pid_alloc( xp_process , &pid ); 206 207 // set output arguments into client RPC descriptor 208 hal_remote_sw( XPTR( client_cxy , &desc->args[0] ) , (uint64_t)error ); 209 hal_remote_sw( XPTR( client_cxy , &desc->args[1] ) , (uint64_t)pid ); 210 211 rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n", 212 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 213 CURRENT_THREAD->core->lid , hal_time_stamp() ); 214 } 215 216 217 ///////////////////////////////////////////////////////////////////////////////////////// 218 // [2] Marshaling functions attached to RPC_PROCESS_EXEC 219 ///////////////////////////////////////////////////////////////////////////////////////// 220 221 //////////////////////////////////////////////// 222 void rpc_process_exec_client( cxy_t cxy, 223 exec_info_t * info, // in 224 error_t * error ) // out 225 { 226 rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n", 227 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 228 CURRENT_THREAD->core->lid , hal_time_stamp() ); 229 230 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); 231 232 // initialise RPC descriptor header 233 rpc_desc_t rpc; 234 rpc.index = RPC_PROCESS_EXEC; 150 // [1] Marshaling functions attached to RPC_PROCESS_MAKE_EXEC 151 ///////////////////////////////////////////////////////////////////////////////////////// 152 153 ///////////////////////////////////////////////////// 154 void rpc_process_make_exec_client( cxy_t cxy, 155 exec_info_t * info, // in 156 error_t * error ) // out 157 { 158 rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n", 159 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 160 CURRENT_THREAD->core->lid , hal_time_stamp() ); 161 162 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); 163 164 // initialise RPC descriptor header 165 rpc_desc_t rpc; 166 rpc.index = RPC_PROCESS_MAKE_EXEC; 235 167 rpc.response = 1; 236 168 … … 249 181 } 250 182 251 ///////////////////////////////////////// 252 void rpc_process_ exec_server( xptr_t xp )183 ////////////////////////////////////////////// 184 void rpc_process_make_exec_server( xptr_t xp ) 253 185 { 254 186 exec_info_t * ptr; // local pointer on remote exec_info structure … … 283 215 } 284 216 217 ///////////////////////////////////////////////////////////////////////////////////////// 218 // [2] Marshaling functions attached to RPC_PROCESS_MAKE_FORK 219 ///////////////////////////////////////////////////////////////////////////////////////// 220 221 /////////////////////////////////////////////////// 222 void rpc_process_make_fork_client( cxy_t cxy, 223 xptr_t ref_process_xp, // in 224 xptr_t parent_thread_xp, // in 225 pid_t * child_pid, // out 226 thread_t ** child_thread_ptr, // out 227 error_t * error ) // out 228 { 229 rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n", 230 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 231 CURRENT_THREAD->core->lid , hal_time_stamp() ); 232 233 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); 234 235 // initialise RPC descriptor header 236 rpc_desc_t rpc; 237 rpc.index = RPC_PROCESS_MAKE_FORK; 238 rpc.response = 1; 239 240 // set input arguments in RPC descriptor 241 rpc.args[0] = (uint64_t)(intptr_t)ref_process_xp; 242 rpc.args[1] = (uint64_t)(intptr_t)parent_thread_xp; 243 244 // register RPC request in remote RPC fifo (blocking function) 245 rpc_send_sync( cxy , &rpc ); 246 247 // get output arguments from RPC descriptor 248 *child_pid = (pid_t)rpc.args[2]; 249 *child_thread_ptr = (thread_t *)(intptr_t)rpc.args[3]; 250 *error = (error_t)rpc.args[4]; 251 252 rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n", 253 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 254 CURRENT_THREAD->core->lid , hal_time_stamp() ); 255 } 256 257 ////////////////////////////////////////////// 258 void rpc_process_make_fork_server( xptr_t xp ) 259 { 260 xptr_t ref_process_xp; // extended pointer on reference parent process 261 xptr_t parent_thread_xp; // extended pointer on parent thread 262 pid_t child_pid; // child process identifier 263 thread_t * child_thread_ptr; // local copy of exec_info structure 264 error_t error; // local error status 265 266 rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n", 267 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 268 CURRENT_THREAD->core->lid , hal_time_stamp() ); 269 270 // get client cluster identifier and pointer on RPC descriptor 271 cxy_t client_cxy = (cxy_t)GET_CXY( xp ); 272 rpc_desc_t * desc = (rpc_desc_t *)GET_PTR( xp ); 273 274 // get input arguments from cient RPC descriptor 275 ref_process_xp = (xptr_t)hal_remote_lwd( XPTR( client_cxy , &desc->args[0] ) ); 276 parent_thread_xp = (xptr_t)hal_remote_lwd( XPTR( client_cxy , &desc->args[1] ) ); 277 278 // call local kernel function 279 error = process_make_fork( ref_process_xp, 280 parent_thread_xp, 281 &child_pid, 282 &child_thread_ptr ); 283 284 // set output argument into client RPC descriptor 285 hal_remote_swd( XPTR( client_cxy , &desc->args[2] ) , (uint64_t)child_pid ); 286 hal_remote_swd( XPTR( client_cxy , &desc->args[3] ) , (uint64_t)(intptr_t)child_thread_ptr ); 287 hal_remote_swd( XPTR( client_cxy , &desc->args[4] ) , (uint64_t)error ); 288 289 rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n", 290 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 291 CURRENT_THREAD->core->lid , hal_time_stamp() ); 292 } 285 293 286 294 ///////////////////////////////////////////////////////////////////////////////////////// … … 1800 1808 } 1801 1809 1810 ///////////////////////////////////////////////////////////////////////////////////////// 1811 // [28] Marshaling functions attached to RPC_VMM_SET_COW 1812 ///////////////////////////////////////////////////////////////////////////////////////// 1813 1814 ///////////////////////////////////////////// 1815 void rpc_vmm_set_cow_client( cxy_t cxy, 1816 process_t * process ) 1817 { 1818 rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n", 1819 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 1820 CURRENT_THREAD->core->lid , hal_time_stamp() ); 1821 1822 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); 1823 1824 // initialise RPC descriptor header 1825 rpc_desc_t rpc; 1826 rpc.index = RPC_VMM_SET_COW; 1827 rpc.response = 1; 1828 1829 // set input arguments in RPC descriptor 1830 rpc.args[0] = (uint64_t)(intptr_t)process; 1831 1832 // register RPC request in remote RPC fifo (blocking function) 1833 rpc_send_sync( cxy , &rpc ); 1834 1835 rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n", 1836 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 1837 CURRENT_THREAD->core->lid , hal_time_stamp() ); 1838 } 1839 1840 //////////////////////////////////////// 1841 void rpc_vmm_set_cow_server( xptr_t xp ) 1842 { 1843 rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n", 1844 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 1845 CURRENT_THREAD->core->lid , hal_time_stamp() ); 1846 1847 process_t * process; 1848 1849 // get client cluster identifier and pointer on RPC descriptor 1850 cxy_t cxy = (cxy_t)GET_CXY( xp ); 1851 rpc_desc_t * desc = (rpc_desc_t *)GET_PTR( xp ); 1852 1853 // get input arguments from client RPC descriptor 1854 process = (process_t *)(intptr_t)hal_remote_lpt( XPTR(cxy , &desc->args[0])); 1855 1856 // call local kernel function 1857 vmm_set_cow( process ); 1858 1859 rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n", 1860 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 1861 CURRENT_THREAD->core->lid , hal_time_stamp() ); 1862 } 1863 1802 1864 /***************************************************************************************/ 1803 1865 /************ Generic functions supporting RPCs : client side **************************/ … … 1835 1897 __FUNCTION__ , local_cxy , server_cxy ); 1836 1898 1837 if( thread_can_yield() ) sched_yield( );1899 if( thread_can_yield() ) sched_yield("RPC fifo full"); 1838 1900 } 1839 1901 } … … 1872 1934 1873 1935 thread_block( this , THREAD_BLOCKED_RPC ); 1874 sched_yield( );1936 sched_yield("client blocked on RPC"); 1875 1937 1876 1938 grpc_dmsg("\n[DBG] %s : core[%x,%d] / thread %s resumes after RPC completion\n", … … 1959 2021 1960 2022 // interrupted thread deschedule always 1961 sched_yield( );2023 sched_yield("IPI received"); 1962 2024 1963 2025 grpc_dmsg("\n[DBG] %s : core[%x,%d] / interrupted thread %s resume / cycle %d\n", … … 2079 2141 2080 2142 // deschedule without blocking 2081 sched_yield( );2143 sched_yield("RPC fifo empty or too much work"); 2082 2144 2083 2145 grpc_dmsg("\n[DBG] %s : core[%x,%d] (RPC thread %x) resumes / cycle %d\n", … … 2089 2151 2090 2152 2091 2092 2093 2094 2095 2096 2097 2098 /* deprecated [AG] 29/09/20172099 2100 ////////////////////////////////////////////////////2101 error_t rpc_activate_thread( remote_fifo_t * rpc_fifo )2102 {2103 core_t * core;2104 thread_t * thread;2105 thread_t * this;2106 scheduler_t * sched;2107 error_t error;2108 bool_t found;2109 reg_t sr_save;2110 2111 2112 this = CURRENT_THREAD;2113 core = this->core;2114 sched = &core->scheduler;2115 found = false;2116 2117 assert( (this->trdid == rpc_fifo->owner) , __FUNCTION__ ,2118 "calling thread is not RPC_FIFO owner\n" );2119 2120 // makes the calling thread not preemptable2121 // during activation / creation of the RPC thread2122 hal_disable_irq( &sr_save );2123 2124 grpc_dmsg("\n[DBG] %s : core[%x,%d] enter at cycle %d\n",2125 __FUNCTION__ , local_cxy , core->lid , hal_time_stamp() );2126 2127 // search one non blocked RPC thread2128 list_entry_t * iter;2129 LIST_FOREACH( &sched->k_root , iter )2130 {2131 thread = LIST_ELEMENT( iter , thread_t , sched_list );2132 if( (thread->type == THREAD_RPC) && (thread->blocked == 0 ) )2133 {2134 found = true;2135 break;2136 }2137 }2138 2139 if( found == false ) // create new RPC thread2140 {2141 error = thread_kernel_create( &thread,2142 THREAD_RPC,2143 &rpc_thread_func,2144 NULL,2145 core->lid );2146 if( error )2147 {2148 hal_restore_irq( sr_save );2149 printk("\n[ERROR] in %s : no memory for new RPC thread in cluster %x\n",2150 __FUNCTION__ , local_cxy );2151 return ENOMEM;2152 }2153 2154 // unblock thread2155 thread->blocked = 0;2156 2157 // update core descriptor counter2158 hal_atomic_add( &LOCAL_CLUSTER->rpc_threads , 1 );2159 2160 grpc_dmsg("\n[DBG] %s : core [%x,%d] creates RPC thread %x at cycle %d\n",2161 __FUNCTION__ , local_cxy , core->lid , thread->trdid , hal_time_stamp() );2162 2163 }2164 else // create a new RPC thread2165 {2166 2167 grpc_dmsg("\n[DBG] %s : core[%x,%d] activates RPC thread %x at cycle %d\n",2168 __FUNCTION__ , local_cxy , core->lid , thread->trdid , hal_time_stamp() );2169 2170 }2171 2172 // update rpc_fifo owner2173 rpc_fifo->owner = thread->trdid;2174 2175 // current thread deschedule2176 sched_yield();2177 2178 // restore IRQs for the calling thread2179 hal_restore_irq( sr_save );2180 2181 // return success2182 return 0;2183 2184 } // end rpc_activate_thread()2185 2186 ////////////////2187 void rpc_check()2188 {2189 thread_t * this = CURRENT_THREAD;2190 remote_fifo_t * rpc_fifo = &LOCAL_CLUSTER->rpc_fifo;2191 error_t error;2192 2193 grpc_dmsg("\n[DBG] %s : core[%x,%d] / thread %x / enter at cycle %d\n",2194 __FUNCTION__ , local_cxy , this->core->lid , this->trdid , hal_time_stamp() );2195 2196 // calling thread does nothing if light lock already taken or FIFO empty2197 if( (rpc_fifo->owner != 0) || (local_fifo_is_empty( &rpc_fifo->fifo )) )2198 {2199 2200 grpc_dmsg("\n[DBG] %s : core[%x,%d] / thread %x / exit do nothing at cycle %d\n",2201 __FUNCTION__ , local_cxy , this->core->lid , this->trdid , hal_time_stamp() );2202 2203 return;2204 }2205 2206 // try to take the light lock, and activates an RPC thread if success2207 if( hal_atomic_test_set( &rpc_fifo->owner , this->trdid ) )2208 {2209 error = rpc_activate_thread( rpc_fifo );2210 2211 if( error ) // cannot activate an RPC_THREAD2212 {2213 rpc_fifo->owner = 0;2214 2215 printk("\n[ERROR] in %s : no memory to create a RPC thread for core %d"2216 " in cluster %x => do nothing\n",2217 __FUNCTION__ , CURRENT_CORE->lid , local_cxy );2218 }2219 2220 return;2221 }2222 else // light lock taken by another thread2223 {2224 2225 grpc_dmsg("\n[DBG] %s : core[%x,%d] / thread %x / exit do nothing at cycle %d\n",2226 __FUNCTION__ , local_cxy , this->core->lid , this->trdid , hal_time_stamp() );2227 2228 return;2229 }2230 } // end rpc_check()2231 2232 2233 //////////////////////2234 void rpc_thread_func()2235 {2236 // makes the RPC thread not preemptable2237 hal_disable_irq( NULL );2238 2239 thread_t * this = CURRENT_THREAD;2240 remote_fifo_t * rpc_fifo = &LOCAL_CLUSTER->rpc_fifo;2241 2242 while(1)2243 {2244 // check fifo ownership (ownership should be given by rpc_activate()2245 assert( (this->trdid == rpc_fifo->owner) , __FUNCTION__ ,2246 "thread %x on core[%x,%d] not owner of RPC_FIFO / owner = %x\n",2247 this->trdid, local_cxy, this->core->lid , rpc_fifo->owner );2248 2249 // executes pending RPC(s)2250 rpc_execute_all( rpc_fifo );2251 2252 // release rpc_fifo ownership if required2253 // (this ownership can be lost during RPC execution)2254 if( rpc_fifo->owner == this->trdid ) rpc_fifo->owner = 0;2255 2256 // deschedule or sucide2257 if( LOCAL_CLUSTER->rpc_threads >= CONFIG_RPC_THREADS_MAX ) // suicide2258 {2259 2260 grpc_dmsg("\n[DBG] %s : core[%x,%d] / thread %x / suicide at cycle %d\n",2261 __FUNCTION__, local_cxy, this->core->lid, this->trdid, hal_time_stamp() );2262 2263 // update core descriptor counter2264 hal_atomic_add( &LOCAL_CLUSTER->rpc_threads , -1 );2265 2266 // suicide2267 thread_exit();2268 }2269 else // deschedule2270 {2271 2272 grpc_dmsg("\n[DBG] %s : core[%x,%d] / thread %x / deschedule at cycle %d\n",2273 __FUNCTION__, local_cxy, this->core->lid, this->trdid, hal_time_stamp() );2274 2275 sched_yield();2276 2277 grpc_dmsg("\n[DBG] %s : core[%x,%d] / thread %x / wake up at cycle %d\n",2278 __FUNCTION__, local_cxy, this->core->lid, this->trdid, hal_time_stamp() );2279 2280 }2281 } // end while2282 } // end rpc_thread_func()2283 2284 */2285 2286 -
trunk/kernel/kern/rpc.h
r407 r408 60 60 { 61 61 RPC_PMEM_GET_PAGES = 0, 62 RPC_PROCESS_ PID_ALLOC = 1,63 RPC_PROCESS_ EXEC= 2,62 RPC_PROCESS_MAKE_EXEC = 1, 63 RPC_PROCESS_MAKE_FORK = 2, 64 64 RPC_PROCESS_KILL = 3, 65 65 RPC_THREAD_USER_CREATE = 4, … … 85 85 RPC_VMM_CREATE_VSEG = 26, 86 86 RPC_SCHED_DISPLAY = 27, 87 87 RPC_VMM_SET_COW = 28, 88 88 RPC_MAX_INDEX = 30, 89 89 } … … 186 186 187 187 /*********************************************************************************** 188 * [1] The RPC_PROCESS_PID_ALLOC allocates one new PID in a remote cluster, registers 189 * the new process in the remote cluster, and returns the PID, and an error code. 188 * [1] The RPC_PROCESS_MAKE_EXEC creates a new process descriptor, from an existing 189 * process descriptor in a remote server cluster. This server cluster must be 190 * the owner cluster for the existing process. The new process descriptor is 191 * initialized from informations found in the <exec_info> structure. 192 * A new main thread descriptor is created in the server cluster. 193 * All copies of the old process descriptor and all old threads are destroyed. 190 194 *********************************************************************************** 191 195 * @ cxy : server cluster identifier. 192 * @ process : [in] local pointer on process descriptorin client cluster.196 * @ process : [in] local pointer on the exec_info structure in client cluster. 193 197 * @ error : [out] error status (0 if success). 194 * @ pid : [out] new process identifier. 195 **********************************************************************************/ 196 void rpc_process_pid_alloc_client( cxy_t cxy, 197 struct process_s * process, 198 error_t * error, 199 pid_t * pid ); 200 201 void rpc_process_pid_alloc_server( xptr_t xp ); 202 203 /*********************************************************************************** 204 * [2] The RPC_PROCESS_EXEC creates a process descriptor copy, in a remote cluster 205 * and initializes if from information found in the reference process descriptor. 206 * This remote cluster becomes the new reference cluster. 207 *********************************************************************************** 208 * @ cxy : server cluster identifier. 209 * @ info : [in] pointer on local exec_info structure. 210 * @ error : [out] error status (0 if success). 211 **********************************************************************************/ 212 void rpc_process_exec_client( cxy_t cxy, 213 struct exec_info_s * info, 214 error_t * error ); 215 216 void rpc_process_exec_server( xptr_t xp ); 198 **********************************************************************************/ 199 void rpc_process_make_exec_client( cxy_t cxy, 200 struct exec_info_s * info, 201 error_t * error ); 202 203 void rpc_process_make_exec_server( xptr_t xp ); 204 205 /*********************************************************************************** 206 * [2] The RPC_PROCESS_MAKE_FORK creates a "child" process descriptor, and the 207 * associated "child" thread descriptor in a target remote cluster that can be 208 * any cluster. The child process is initialized from informations found in the 209 * "parent" process descriptor (that must be the parent reference cluster), 210 * and from the "parent" thread descriptor that can be in any cluster. 211 *********************************************************************************** 212 * @ cxy : server cluster identifier. 213 * @ ref_process_xp : [in] extended pointer on reference parent process. 214 * @ parent_thread_xp : [in] extended pointer on parent thread. 215 * @ child_pid : [out] child process identifier. 216 * @ child_thread_ptr : [out] local pointer on child thread. 217 * @ error : [out] error status (0 if success). 218 **********************************************************************************/ 219 void rpc_process_make_fork_client( cxy_t cxy, 220 xptr_t ref_process_xp, 221 xptr_t parent_thread_xp, 222 pid_t * child_pid, 223 struct thread_s ** child_thread_ptr, 224 error_t * error ); 225 226 void rpc_process_make_fork_server( xptr_t xp ); 217 227 218 228 /*********************************************************************************** … … 613 623 void rpc_sched_display_server( xptr_t xp ); 614 624 625 /*********************************************************************************** 626 * [28] The RPC_VMM_SET_COW allows a client thread to request the remote reference 627 * cluster to set the COW flag and reset the WRITABLE flag of all GPT entries for 628 * the DATA, MMAP and REMOTE vsegs of process identified by the <process> argument. 629 630 * of a remote scheduler, identified by the <lid> argument. 631 *********************************************************************************** 632 * @ cxy : server cluster identifier. 633 * @ process : [in] local pointer on reference process descriptor. 634 **********************************************************************************/ 635 void rpc_vmm_set_cow_client( cxy_t cxy, 636 struct process_s * process ); 637 638 void rpc_vmm_set_cow_server( xptr_t xp ); 639 615 640 #endif -
trunk/kernel/kern/scheduler.c
r407 r408 128 128 } // end sched_remove() 129 129 130 //////////////////////////////////////// 131 thread_t * sched_select( core_t * core)132 { 133 thread_t * thread;134 135 scheduler_t * sched = &core->scheduler;130 ////////////////////////////////////////////// 131 thread_t * sched_select( scheduler_t * sched ) 132 { 133 thread_t * thread; 134 list_entry_t * current; 135 list_entry_t * last; 136 136 137 137 // take lock protecting sheduler lists 138 138 spinlock_lock( &sched->lock ); 139 140 list_entry_t * current;141 list_entry_t * last;142 139 143 140 // first loop : scan the kernel threads list if not empty … … 172 169 break; 173 170 174 default: // DEV thread if non blocked 175 if( thread->blocked == 0 ) 171 default: // DEV thread if non blocked and waiting queue non empty 172 if( (thread->blocked == 0) && 173 (xlist_is_empty( XPTR( local_cxy , &thread->chdev->wait_root)) == 0) ) 176 174 { 177 175 spinlock_unlock( &sched->lock ); … … 253 251 scheduler_t * sched = &core->scheduler; 254 252 253 // signal_dmsg("\n@@@ %s enter at cycle %d\n", 254 // __FUNCTION__ , hal_time_stamp() ); 255 255 256 // take lock protecting threads lists 256 257 spinlock_lock( &sched->lock ); … … 260 261 { 261 262 thread = LIST_ELEMENT( iter , thread_t , sched_list ); 262 if( thread->signals ) sched_kill_thread( thread ); 263 if( thread->signals ) // sched_kill_thread( thread ); 264 { 265 printk("\n[WARNING] %s : thread %x has signal %x at cycle %d\n", 266 __FUNCTION__, thread, thread->signals, hal_time_stamp() ); 267 } 263 268 } 264 269 … … 267 272 { 268 273 thread = LIST_ELEMENT( iter , thread_t , sched_list ); 269 if( thread->signals ) sched_kill_thread( thread ); 274 if( thread->signals ) // sched_kill_thread( thread ); 275 { 276 printk("\n[WARNING] %s : thread %x has signal %x at cycle %d\n", 277 __FUNCTION__, thread, thread->signals, hal_time_stamp() ); 278 279 } 270 280 } 271 281 … … 273 283 spinlock_unlock( &sched->lock ); 274 284 285 // signal_dmsg("\n@@@ %s exit at cycle %d\n", 286 // __FUNCTION__ , hal_time_stamp() ); 287 275 288 } // end sched_handle_signals() 276 289 277 ////////////////////////////////////// 278 void sched_update( thread_t * current, 279 thread_t * next ) 280 { 281 scheduler_t * sched = ¤t->core->scheduler; 282 283 if( current->type == THREAD_USER ) sched->u_last = ¤t->sched_list; 284 else sched->k_last = ¤t->sched_list; 285 286 sched->current = next; 287 } 288 289 ////////////////// 290 void sched_yield() 290 //////////////////////////////// 291 void sched_yield( char * cause ) 291 292 { 292 293 thread_t * next; 293 294 thread_t * current = CURRENT_THREAD; 295 scheduler_t * sched = ¤t->core->scheduler; 294 296 295 297 #if( CONFIG_SCHED_DEBUG & 0x1 ) … … 304 306 } 305 307 308 // enter critical section / save SR in current thread context 309 hal_disable_irq( ¤t->save_sr ); 310 306 311 // loop on threads to select next thread 307 next = sched_select( current->core);312 next = sched_select( sched ); 308 313 309 314 // check next thread attached to same core as the calling thread … … 319 324 if( next != current ) 320 325 { 321 // current thread desactivate IRQs 322 hal_disable_irq( &switch_save_sr[CURRENT_THREAD->core->lid] ); 323 324 sched_dmsg("\n[DBG] %s : core[%x,%d] / trd %x (%s) (%x,%x) => trd %x (%s) (%x,%x) / cycle %d\n", 325 __FUNCTION__, local_cxy, current->core->lid, 326 327 sched_dmsg("\n[DBG] %s : core[%x,%d] / cause = %s\n" 328 " thread %x (%s) (%x,%x) => thread %x (%s) (%x,%x) / cycle %d\n", 329 __FUNCTION__, local_cxy, current->core->lid, cause, 326 330 current, thread_type_str(current->type), current->process->pid, current->trdid, 327 331 next , thread_type_str(next->type) , next->process->pid , next->trdid, 328 hal_time_stamp() );332 (uint32_t)hal_get_cycles() ); 329 333 330 334 // update scheduler 331 sched_update( current , next ); 335 sched->current = next; 336 if( next->type == THREAD_USER ) sched->u_last = &next->sched_list; 337 else sched->k_last = &next->sched_list; 332 338 333 339 // handle FPU ownership … … 340 346 // switch CPU from calling thread context to new thread context 341 347 hal_do_cpu_switch( current->cpu_context, next->cpu_context ); 342 343 // restore IRQs when next thread resume344 hal_restore_irq( switch_save_sr[CURRENT_THREAD->core->lid] );345 348 } 346 349 else 347 350 { 348 351 349 sched_dmsg("\n[DBG] %s : core[%x,%d] / thread %x (%s) continue / cycle %d\n", 350 __FUNCTION__, local_cxy, current->core->lid, current->trdid, 351 thread_type_str(current->type) ,hal_time_stamp() ); 352 353 } 352 sched_dmsg("\n[DBG] %s : core[%x,%d] / cause = %s\n" 353 " thread %x (%s) (%x,%x) continue / cycle %d\n", 354 __FUNCTION__, local_cxy, current->core->lid, cause, 355 current, thread_type_str(current->type), current->process->pid, current->trdid, 356 (uint32_t)hal_get_cycles() ); 357 358 } 359 360 // exit critical section / restore SR from next thread context 361 hal_restore_irq( next->save_sr ); 362 354 363 } // end sched_yield() 355 364 … … 384 393 385 394 nolock_printk("\n***** scheduler state for core[%x,%d] at cycle %d\n" 386 "kernel_threads = %d / user_threads = %d / current = %x / idle = %x\n",395 "kernel_threads = %d / user_threads = %d / current = (%x,%x)\n", 387 396 local_cxy , core->lid, hal_time_stamp(), 388 397 sched->k_threads_nr, sched->u_threads_nr, 389 sched->current-> trdid , sched->idle->trdid );398 sched->current->process->pid , sched->current->trdid ); 390 399 391 400 // display kernel threads … … 393 402 { 394 403 thread = LIST_ELEMENT( iter , thread_t , sched_list ); 395 nolock_printk(" - type = %s / trdid = %X / pid = %X / func = %X / blocked = %X\n", 396 thread_type_str( thread->type ), thread->trdid, thread->process->pid, 397 thread->entry_func, thread->blocked ); 404 if (thread->type == THREAD_DEV) 405 { 406 nolock_printk(" - %s / pid %X / trdid %X / desc %X / blocked %X / %s\n", 407 thread_type_str( thread->type ), thread->process->pid, thread->trdid, 408 thread, thread->blocked , thread->chdev->name ); 409 } 410 else 411 { 412 nolock_printk(" - %s / pid %X / trdid %X / desc %X / blocked %X\n", 413 thread_type_str( thread->type ), thread->process->pid, thread->trdid, 414 thread, thread->blocked ); 415 } 398 416 } 399 417 … … 402 420 { 403 421 thread = LIST_ELEMENT( iter , thread_t , sched_list ); 404 nolock_printk(" - type = %s / trdid = %X / pid = %X / func = %X / blocked =%X\n",405 thread_type_str( thread->type ), thread-> trdid, thread->process->pid,406 thread ->entry_func, thread->blocked );422 nolock_printk(" - %s / pid %X / trdid %X / desc %X / blocked %X\n", 423 thread_type_str( thread->type ), thread->process->pid, thread->trdid, 424 thread, thread->blocked ); 407 425 } 408 426 -
trunk/kernel/kern/scheduler.h
r407 r408 74 74 75 75 /********************************************************************************************* 76 * This function handles pending signals for all registered threads, and calls the 77 * sched_select() function to make a context switch for the core running the calling thread. 76 * This function is the only method to make a context switch. It is called in cas of TICK, 77 * or when when a thread explicitely requires a scheduling. 78 * It handles the pending signals for all threads attached to the core running the calling 79 * thread, and calls the sched_select() function to select a new thread. 80 * The cause argument is only used for debug by the sched_display() function, and 81 * indicates the scheduling cause. 82 ********************************************************************************************* 83 * @ cause : character string defining the scheduling cause. 78 84 ********************************************************************************************/ 79 void sched_yield( );85 void sched_yield( char * cause ); 80 86 81 87 /********************************************************************************************* … … 101 107 /********************************************************************************************* 102 108 * This function does NOT modify the scheduler state. 103 * It just select a thread in the list of attached threads, implementing the following policy: 104 * 1) it scan the list of kernel threads, from the next thread after the last executed one, 105 * and returns the first runnable found (can be the current thread). 106 * 2) if no kernel thread found, it scan the list of user thread, from the next thread after 107 * the last executed one, and returns the first runable found (can be the current thread). 108 * 3) if no runable thread found, it returns the idle thread. 109 * It just select a thread in the list of attached threads, implementing the following 110 * three steps policy: 111 * 1) It scan the list of kernel threads, from the next thread after the last executed one, 112 * and returns the first runnable found : not IDLE, not blocked, client queue not empty. 113 * It can be the current thread. 114 * 2) If no kernel thread found, it scan the list of user thread, from the next thread after 115 * the last executed one, and returns the first runable found : not blocked. 116 * It can be the current thread. 117 * 3) If no runable thread found, it returns the idle thread. 109 118 ********************************************************************************************* 110 * @ core : local pointer on the core descriptor.119 * @ core : local pointer on scheduler. 111 120 * @ returns pointer on selected thread descriptor 112 121 ********************************************************************************************/ 113 struct thread_s * sched_select( struct core_s * core);122 struct thread_s * sched_select( struct scheduler_s * sched ); 114 123 115 124 /********************************************************************************************* -
trunk/kernel/kern/thread.c
r407 r408 116 116 // - thread_user_fork() 117 117 // - thread_kernel_create() 118 // - thread_user_init()119 118 ///////////////////////////////////////////////////////////////////////////////////// 120 119 // @ thread : pointer on thread descriptor … … 200 199 thread->signature = THREAD_SIGNATURE; 201 200 201 // FIXME call hal_thread_init() function to initialise the save_sr field 202 thread->save_sr = 0xFF13; 203 202 204 // update local DQDT 203 205 dqdt_local_update_threads( 1 ); … … 322 324 } 323 325 326 // update DQDT for new thread 327 dqdt_local_update_threads( 1 ); 328 324 329 thread_dmsg("\n[DBG] %s : core[%x,%d] exit / trdid = %x / process %x / core = %d\n", 325 330 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, … … 331 336 } // end thread_user_create() 332 337 333 //////////////////////////////////////////////////// 334 error_t thread_user_fork( process_t * process, 335 intptr_t stack_base, 336 uint32_t stack_size, 337 thread_t ** new_thread ) 338 /////////////////////////////////////////////////////// 339 error_t thread_user_fork( xptr_t parent_thread_xp, 340 process_t * child_process, 341 thread_t ** child_thread ) 338 342 { 339 343 error_t error; 340 thread_t * child; // pointer on new thread descriptor 341 lid_t core_lid; // selected core local index 342 343 thread_dmsg("\n[DBG] %s : core[%x,%d] enters\n", 344 __FUNCTION__ , local_cxy , core_lid ); 344 thread_t * child_ptr; // local pointer on local child thread 345 lid_t core_lid; // selected core local index 346 347 thread_t * parent_ptr; // local pointer on remote parent thread 348 cxy_t parent_cxy; // parent thread cluster 349 process_t * parent_process; // local pointer on parent process 350 xptr_t parent_gpt_xp; // extended pointer on parent thread GPT 351 352 void * func; // parent thread entry_func 353 void * args; // parent thread entry_args 354 intptr_t base; // parent thread u_stack_base 355 uint32_t size; // parent thread u_stack_size 356 uint32_t flags; // parent_thread flags 357 vpn_t vpn_base; // parent thread stack vpn_base 358 vpn_t vpn_size; // parent thread stack vpn_size 359 reg_t * uzone; // parent thread pointer on uzone 360 361 vseg_t * vseg; // child thread STACK vseg 362 363 thread_dmsg("\n[DBG] %s : core[%x,%d] enters at cycle %d\n", 364 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , hal_get_cycles() ); 345 365 346 366 // select a target core in local cluster 347 367 core_lid = cluster_select_local_core(); 348 368 349 // get pointer on parent thread descriptor 350 thread_t * parent = CURRENT_THREAD; 351 352 // allocate memory for new thread descriptor 353 child = thread_alloc(); 354 355 if( child == NULL ) 369 // get cluster and local pointer on parent thread descriptor 370 parent_cxy = GET_CXY( parent_thread_xp ); 371 parent_ptr = (thread_t *)GET_PTR( parent_thread_xp ); 372 373 // get relevant fields from parent thread 374 func = (void *) hal_remote_lpt( XPTR( parent_cxy , &parent_ptr->entry_func ) ); 375 args = (void *) hal_remote_lpt( XPTR( parent_cxy , &parent_ptr->entry_args ) ); 376 base = (intptr_t)hal_remote_lpt( XPTR( parent_cxy , &parent_ptr->u_stack_base ) ); 377 size = (uint32_t)hal_remote_lw ( XPTR( parent_cxy , &parent_ptr->u_stack_size ) ); 378 flags = hal_remote_lw ( XPTR( parent_cxy , &parent_ptr->flags ) ); 379 uzone = (reg_t *) hal_remote_lpt( XPTR( parent_cxy , &parent_ptr->uzone ) ); 380 381 vpn_base = base >> CONFIG_PPM_PAGE_SHIFT; 382 vpn_size = size >> CONFIG_PPM_PAGE_SHIFT; 383 384 // get pointer on parent process in parent thread cluster 385 parent_process = (process_t *)hal_remote_lpt( XPTR( parent_cxy, 386 &parent_ptr->process ) ); 387 388 // get extended pointer on parent GPT in parent thread cluster 389 parent_gpt_xp = XPTR( parent_cxy , &parent_process->vmm.gpt ); 390 391 // allocate memory for child thread descriptor 392 child_ptr = thread_alloc(); 393 if( child_ptr == NULL ) 356 394 { 357 395 printk("\n[ERROR] in %s : cannot allocate new thread\n", __FUNCTION__ ); 358 return ENOMEM;396 return -1; 359 397 } 360 398 361 399 // initialize thread descriptor 362 error = thread_init( child ,363 process,400 error = thread_init( child_ptr, 401 child_process, 364 402 THREAD_USER, 365 parent->entry_func,366 parent->entry_args,403 func, 404 args, 367 405 core_lid, 368 stack_base, 369 stack_size ); 370 406 base, 407 size ); 371 408 if( error ) 372 409 { 373 printk("\n[ERROR] in %s : cannot initialize newthread\n", __FUNCTION__ );374 thread_release( child );410 printk("\n[ERROR] in %s : cannot initialize child thread\n", __FUNCTION__ ); 411 thread_release( child_ptr ); 375 412 return EINVAL; 376 413 } 377 414 378 415 // return child pointer 379 *new_thread = child; 380 381 // set DETACHED flag if required 382 if( parent->flags & THREAD_FLAG_DETACHED ) child->flags = THREAD_FLAG_DETACHED; 416 *child_thread = child_ptr; 417 418 // set detached flag if required 419 if( flags & THREAD_FLAG_DETACHED ) child_ptr->flags = THREAD_FLAG_DETACHED; 420 421 // update uzone pointer in child thread descriptor 422 child_ptr->uzone = (char *)((intptr_t)uzone + 423 (intptr_t)child_ptr - 424 (intptr_t)parent_ptr ); 425 383 426 384 427 // allocate CPU context for child thread 385 if( hal_cpu_context_alloc( child ) )428 if( hal_cpu_context_alloc( child_ptr ) ) 386 429 { 387 430 printk("\n[ERROR] in %s : cannot allocate CPU context\n", __FUNCTION__ ); 388 thread_release( child );389 return ENOMEM;431 thread_release( child_ptr ); 432 return -1; 390 433 } 391 434 392 435 // allocate FPU context for child thread 393 if( hal_fpu_context_alloc( child ) )436 if( hal_fpu_context_alloc( child_ptr ) ) 394 437 { 395 438 printk("\n[ERROR] in %s : cannot allocate FPU context\n", __FUNCTION__ ); 396 thread_release( child ); 397 return ENOMEM; 398 } 399 400 // copy kernel stack content from parent to child thread descriptor 401 void * dst = (void *)(&child->signature) + 4; 402 void * src = (void *)(&parent->signature) + 4; 403 memcpy( dst , src , parent->k_stack_size ); 439 thread_release( child_ptr ); 440 return -1; 441 } 442 443 // create and initialize STACK vseg 444 vseg = vseg_alloc(); 445 vseg_init( vseg, 446 VSEG_TYPE_STACK, 447 base, 448 size, 449 vpn_base, 450 vpn_size, 451 0, 0, XPTR_NULL, // not a file vseg 452 local_cxy ); 453 454 // register STACK vseg in local child VSL 455 vseg_attach( &child_process->vmm , vseg ); 456 457 // copy all valid STACK GPT entries 458 vpn_t vpn; 459 bool_t mapped; 460 ppn_t ppn; 461 for( vpn = vpn_base ; vpn < (vpn_base + vpn_size) ; vpn++ ) 462 { 463 error = hal_gpt_pte_copy( &child_process->vmm.gpt, 464 parent_gpt_xp, 465 vpn, 466 true, // set cow 467 &ppn, 468 &mapped ); 469 if( error ) 470 { 471 vseg_detach( &child_process->vmm , vseg ); 472 vseg_free( vseg ); 473 thread_release( child_ptr ); 474 printk("\n[ERROR] in %s : cannot update child GPT\n", __FUNCTION__ ); 475 return -1; 476 } 477 478 // increment page descriptor fork_nr for the referenced page if mapped 479 if( mapped ) 480 { 481 xptr_t page_xp = ppm_ppn2page( ppn ); 482 cxy_t page_cxy = GET_CXY( page_xp ); 483 page_t * page_ptr = (page_t *)GET_PTR( page_xp ); 484 hal_remote_atomic_add( XPTR( page_cxy , &page_ptr->fork_nr ) , 1 ); 485 486 thread_dmsg("\n[DBG] %s : core[%x,%d] copied PTE to child GPT : vpn %x\n", 487 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , vpn ); 488 489 } 490 } 491 492 // set COW flag for STAK vseg in parent thread GPT 493 hal_gpt_flip_cow( true, // set cow 494 parent_gpt_xp, 495 vpn_base, 496 vpn_size ); 497 498 // update DQDT for child thread 499 dqdt_local_update_threads( 1 ); 404 500 405 501 thread_dmsg("\n[DBG] %s : core[%x,%d] exit / created main thread %x for process %x\n", 406 __FUNCTION__, local_cxy , core_lid , child->trdid ,process->pid );502 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, child_ptr->trdid, child_process->pid ); 407 503 408 504 return 0; … … 452 548 hal_cpu_context_create( thread ); 453 549 550 // update DQDT for kernel thread 551 dqdt_local_update_threads( 1 ); 552 454 553 thread_dmsg("\n[DBG] %s : core = [%x,%d] exit / trdid = %x / type %s / cycle %d\n", 455 554 __FUNCTION__, local_cxy, core_lid, thread->trdid, thread_type_str(type), hal_time_stamp() ); … … 511 610 512 611 // update intrumentation values 513 uint32_t pgfaults = thread->info.pgfault_nr; 514 uint32_t u_errors = thread->info.u_err_nr; 515 uint32_t m_errors = thread->info.m_err_nr; 516 517 process->vmm.pgfault_nr += pgfaults; 518 process->vmm.u_err_nr += u_errors; 519 process->vmm.m_err_nr += m_errors; 612 process->vmm.pgfault_nr += thread->info.pgfault_nr; 520 613 521 614 // release memory allocated for CPU context and FPU context … … 635 728 { 636 729 this->flags &= ~THREAD_FLAG_SCHED; 637 sched_yield( );730 sched_yield( "delayed scheduling" ); 638 731 } 639 732 … … 697 790 698 791 // deschedule 699 sched_yield( );792 sched_yield( "exit" ); 700 793 return 0; 701 794 … … 721 814 while( 1 ) 722 815 { 816 // unmask IRQs 817 hal_enable_irq( NULL ); 818 723 819 if( CONFIG_THREAD_IDLE_MODE_SLEEP ) // force core to low-power mode 724 820 { … … 740 836 741 837 // force scheduling at each iteration 742 sched_yield( );838 sched_yield( "idle" ); 743 839 } 744 840 } // end thread_idle() … … 754 850 /////////////////////////////////////////////////// 755 851 void thread_kernel_time_update( thread_t * thread ) 756 {757 // TODO758 // printk("\n[WARNING] function %s not implemented\n", __FUNCTION__ );759 }760 761 ////////////////////////////////////////////////762 void thread_signals_handle( thread_t * thread )763 852 { 764 853 // TODO -
trunk/kernel/kern/thread.h
r407 r408 34 34 #include <spinlock.h> 35 35 #include <core.h> 36 #include <chdev.h> 36 37 #include <cluster.h> 37 38 #include <process.h> … … 96 97 #define THREAD_BLOCKED_RPC 0x0200 /*! thread wait RPC completion */ 97 98 98 #define THREAD_BLOCKED_DEV_QUEUE 0x2000 /*! thread DEV wait queue */99 99 #define THREAD_BLOCKED_DEV_ISR 0x4000 /*! thread DEV wait ISR */ 100 100 … … 132 132 * thread is registered in the local copy of the process descriptor. 133 133 * 134 * WARNING : Don't modify the first 4fields order, as this order is used by the134 * WARNING : Don't modify the first 3 fields order, as this order is used by the 135 135 * hal_kentry assembly code for the TSAR architecture. 136 136 **************************************************************************************/ … … 140 140 typedef struct thread_s 141 141 { 142 void * cpu_context; /*! used for context switch */ 143 void * fpu_context; /*! used for dynamic FPU allocation */ 142 void * cpu_context; /*! pointer on CPU context switch */ 143 void * fpu_context; /*! pointer on FPU context switch */ 144 void * uzone; /*! pointer on uzone for hal_kentry */ 144 145 145 146 intptr_t k_stack_base; /*! kernel stack base address */ … … 172 173 173 174 uint32_t flags; /*! bit vector of flags */ 174 volatile uint32_t blocked; /*! bit vector of blocking causes*/175 volatile uint32_t signals; /*! bit vector of (KILL / SUICIDE) signals*/175 uint32_t signals; /*! bit vector of (KILL / SUICIDE) signals */ 176 uint32_t blocked; /*! bit vector of blocking causes */ 176 177 177 178 error_t errno; /*! errno value set by last system call */ … … 189 190 list_entry_t sched_list; /*! member of threads attached to same core */ 190 191 191 uint32_t dev_channel; /*! device channel for a DEV thread */ 192 chdev_t * chdev; /*! chdev pointer (for a DEV thread only) */ 193 194 reg_t save_sr; /*! used by sched_yield() function */ 192 195 193 196 ioc_command_t ioc_cmd; /*! IOC device generic command */ … … 222 225 223 226 /*************************************************************************************** 224 * This function allocates memory for a user thread descriptor in the local cluster,225 * and initializes it from information contained in the arguments.226 * It is used by the "pthread_create" system call.227 * The CPU context is initialized from scratch , and the "loadable" field is set.228 * The new thread is attached to the core specified in the <attr> argument.227 * This function is used by the pthread_create() system call to create a "new" thread 228 * in an existing process. It allocates memory for an user thread descriptor in the 229 * local cluster, and initializes it from information contained in the arguments. 230 * The CPU context is initialized from scratch. If required by the <attr> argument, 231 * the new thread is attached to the core specified in <attr>. 229 232 * It is registered in the local process descriptor specified by the <pid> argument. 230 233 * The thread descriptor pointer is returned to allow the parent thread to register it … … 246 249 247 250 /*************************************************************************************** 248 * This function is used by the fork() system call to create the child process main249 * thread. It allocates memory for an user thread descriptor in the local cluster,250 * and initializes it from information contained in the calling thread descriptor.251 * This function is used by the sys_fork() system call to create the "child" thread 252 * in the local cluster. It allocates memory for a thread descriptor, and initializes 253 * it from the "parent" thread descriptor defined by the <parent_thread_xp> argument. 251 254 * The new thread is attached to the core that has the lowest load in local cluster. 252 * It is registered in the child process descriptor defined by the <process> argument.255 * It is registered in the "child" process defined by the <child_process> argument. 253 256 * This new thread inherits its user stack from the parent thread, as it uses the 254 257 * Copy-On-Write mechanism to get a private stack when required. … … 256 259 * the Copy-On-Write mechanism cannot be used for kernel segments (because kernel 257 260 * uses physical addressing on some architectures). 258 * The CPU and FPU execution contexts are created and linked to the new thread, 259 * but the actual context copy is NOT done. The THREAD_BLOCKED_GLOBAL bit is set, 260 * and the thread must be explicitely unblocked later to make the new thread runable. 261 *************************************************************************************** 262 * @ process : local pointer on owner process descriptor. 263 * @ stack_base : user stack base address (from parent). 264 * @ stack_size : user stack size (from parent). 265 * @ new_thread : [out] address of buffer for new thread descriptor pointer. 266 * @ returns 0 if success / returns ENOMEM if error. 267 **************************************************************************************/ 268 error_t thread_user_fork( process_t * process, 269 intptr_t stack_base, 270 uint32_t stack_size, 271 thread_t ** new_thread ); 261 * The CPU and FPU execution contexts are created and linked to the new thread. 262 * but the actual context copy is NOT done, and must be done by by the sys_fork(). 263 * The THREAD_BLOCKED_GLOBAL bit is set => the thread must be activated to start. 264 *************************************************************************************** 265 * @ parent_thread_xp : extended pointer on parent thread descriptor. 266 * @ child_process : local pointer on child process descriptor. 267 * @ child_thread : [out] address of buffer for child thread descriptor pointer. 268 * @ returns 0 if success / returns -1 if error. 269 **************************************************************************************/ 270 error_t thread_user_fork( xptr_t parent_thread_xp, 271 process_t * child_process, 272 thread_t ** child_thread ); 272 273 273 274 /*************************************************************************************** 274 275 * This function allocates memory for a kernel thread descriptor in the local cluster, 275 * and initializes it from arguments values, calling the thread_kernel_init() function, 276 * that also allocates and initializes the CPU context. 276 * and initializes it from arguments values. 277 * It is called by kernel_init() to statically create all DEV server threads 278 * It is also called to dynamically create RPC threads when required. 277 279 * The THREAD_BLOCKED_GLOBAL bit is set, and the thread must be activated to start. 278 280 *************************************************************************************** … … 291 293 292 294 /*************************************************************************************** 293 * This function initializes an existing kernelthread descriptor from arguments values.295 * This function initializes an existing thread descriptor from arguments values. 294 296 * The THREAD_BLOCKED_GLOBAL bit is set, and the thread must be activated to start. 297 * It is called by the kernel_init() function to initialize the IDLE thread. 295 298 *************************************************************************************** 296 299 * @ thread : pointer on existing thread descriptor.
Note: See TracChangeset
for help on using the changeset viewer.