Changeset 408 for trunk/kernel
- Timestamp:
- Dec 5, 2017, 4:20:07 PM (7 years ago)
- Location:
- trunk/kernel
- Files:
-
- 48 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/kernel/devices/dev_dma.c
r407 r408 47 47 48 48 // set dma name 49 snprintf( dma->name , 16 , "dma _%d_%x" , channel , local_cxy );49 snprintf( dma->name , 16 , "dma%d_%x" , channel , local_cxy ); 50 50 51 51 // call driver init function … … 72 72 } 73 73 74 // initialises server field in DMAchdev descriptor74 // initialises server field in chdev descriptor 75 75 dma->server = new_thread; 76 77 // initializes chdev field in thread descriptor 78 new_thread->chdev = dma; 76 79 77 // start server thread 78 thread_block( new_thread , THREAD_BLOCKED_DEV_QUEUE ); 80 // unblock server thread 79 81 thread_unblock( XPTR( local_cxy , new_thread ) , THREAD_BLOCKED_GLOBAL ); 80 82 -
trunk/kernel/devices/dev_fbf.c
r407 r408 40 40 { 41 41 // set FBF chdev extension fields 42 // TODO this should be done in the imp ementation42 // TODO this should be done in the implementation 43 43 // TODO specific part, as these parameters must be obtained from the hardware. 44 44 chdev->ext.fbf.width = CONFIG_FBF_WIDTH; -
trunk/kernel/devices/dev_ioc.c
r407 r408 77 77 assert( (error == 0) , __FUNCTION__ , "cannot create server thread" ); 78 78 79 // set "server" field in iocdescriptor79 // set "server" field in chdev descriptor 80 80 ioc->server = new_thread; 81 81 82 // start server thread 83 thread_block( new_thread , THREAD_BLOCKED_DEV_QUEUE ); 82 // set "chdev field in thread descriptor 83 new_thread->chdev = ioc; 84 85 // unblock server thread 84 86 thread_unblock( XPTR( local_cxy , new_thread ) , THREAD_BLOCKED_GLOBAL ); 85 87 -
trunk/kernel/devices/dev_nic.c
r407 r408 79 79 nic->server = new_thread; 80 80 81 // start server thread 82 thread_block( new_thread , THREAD_BLOCKED_DEV_QUEUE ); 81 // set "chdev" field in thread descriptor 82 new_thread->chdev = nic; 83 84 // unblock server thread 83 85 thread_unblock( XPTR( local_cxy , new_thread ) , THREAD_BLOCKED_GLOBAL ); 84 86 … … 101 103 102 104 // get pointer on NIC-RX chdev descriptor 103 uint32_t channel = thread_ptr-> dev_channel;105 uint32_t channel = thread_ptr->chdev->channel; 104 106 xptr_t dev_xp = chdev_dir.nic_rx[channel]; 105 107 cxy_t dev_cxy = GET_CXY( dev_xp ); … … 129 131 // block on THREAD_BLOCKED_IO condition and deschedule 130 132 thread_block( thread_ptr , THREAD_BLOCKED_IO ); 131 sched_yield( );133 sched_yield("client blocked on I/O"); 132 134 133 135 // disable NIC-RX IRQ … … 171 173 172 174 // get pointer on NIC-TX chdev descriptor 173 uint32_t channel = thread_ptr-> dev_channel;175 uint32_t channel = thread_ptr->chdev->channel; 174 176 xptr_t dev_xp = chdev_dir.nic_tx[channel]; 175 177 cxy_t dev_cxy = GET_CXY( dev_xp ); … … 199 201 // block on THREAD_BLOCKED I/O condition and deschedule 200 202 thread_block( thread_ptr , THREAD_BLOCKED_IO ); 201 sched_yield( );203 sched_yield("client blocked on I/O"); 202 204 203 205 // disable NIC-TX IRQ -
trunk/kernel/devices/dev_txt.c
r407 r408 93 93 txt->server = new_thread; 94 94 95 // start server thread 96 thread_block( new_thread , THREAD_BLOCKED_DEV_QUEUE ); 95 // set "chdev" field in thread descriptor 96 new_thread->chdev = txt; 97 98 // unblock server thread 97 99 thread_unblock( XPTR( local_cxy , new_thread ) , THREAD_BLOCKED_GLOBAL ); 98 100 } -
trunk/kernel/fs/vfs.c
r407 r408 3 3 * 4 4 * Author Mohamed Lamine Karaoui (2015) 5 * Alain Greiner (2016 )5 * Alain Greiner (2016,2017) 6 6 * 7 7 * Copyright (c) UPMC Sorbonne Universites -
trunk/kernel/kern/chdev.c
r407 r408 120 120 uint32_t lid; // core running the server thread local index 121 121 xptr_t lock_xp; // extended pointer on lock protecting the chdev queue 122 uint32_t modified; // non zero if the server thread state was modified122 uint32_t different; // non zero if server thread core != client thread core 123 123 uint32_t save_sr; // for critical section 124 124 … … 152 152 lid = hal_remote_lw( XPTR( chdev_cxy , &core_ptr->lid ) ); 153 153 154 // enter critical section 154 // compute server core != thread core 155 different = (lid != this->core->lid) || (local_cxy != chdev_cxy); 156 157 // enter critical section to make atomic : 158 // (1) client blocking 159 // (2) client registration in server queue 160 // (3) IPI to force server scheduling 161 // (4) descheduling 162 // ... in this order 155 163 hal_disable_irq( &save_sr ); 164 165 // block current thread 166 thread_block( CURRENT_THREAD , THREAD_BLOCKED_IO ); 156 167 157 168 // register client thread in waiting queue … … 160 171 remote_spinlock_unlock( lock_xp ); 161 172 162 // unblock server thread 163 modified = thread_unblock( XPTR( chdev_cxy , server_ptr ), THREAD_BLOCKED_DEV_QUEUE ); 164 165 // send IPI to core running the server thread 166 if( modified ) dev_pic_send_ipi( chdev_cxy , lid ); 173 // send IPI to core running the server thread if required 174 if( different ) dev_pic_send_ipi( chdev_cxy , lid ); 167 175 168 // block client thread169 assert( thread_can_yield( this ) , __FUNCTION__ , "illegal sched_yield\n" );170 171 176 chdev_dmsg("\n[DBG] %s : core[%x,%d] (thread %s) deschedules / cycle %d\n", 172 177 __FUNCTION__, local_cxy, this->core->lid, thread_type_str(this->type) , hal_time_stamp() ); 173 178 174 thread_block( CURRENT_THREAD , THREAD_BLOCKED_IO ); 175 sched_yield(); 179 // deschedule 180 assert( thread_can_yield( this ) , __FUNCTION__ , "illegal sched_yield\n" ); 181 sched_yield("blocked on I/O"); 176 182 177 183 chdev_dmsg("\n[DBG] %s : core[%x,%d] (thread %s) resumes / cycle %d\n", … … 217 223 __FUNCTION__ , server , hal_time_stamp() ); 218 224 219 // block and deschedule 220 thread_block( server , THREAD_BLOCKED_DEV_QUEUE ); 221 sched_yield(); 225 // deschedule 226 sched_yield("I/O queue empty"); 222 227 223 228 chdev_dmsg("\n[DBG] %s : thread %x resume /cycle %d\n", -
trunk/kernel/kern/cluster.c
r407 r408 45 45 #include <dqdt.h> 46 46 47 ///////////////////////////////////////////////////////////////////////////////////// //////47 ///////////////////////////////////////////////////////////////////////////////////// 48 48 // Extern global variables 49 ///////////////////////////////////////////////////////////////////////////////////// //////49 ///////////////////////////////////////////////////////////////////////////////////// 50 50 51 51 extern process_t process_zero; // allocated in kernel_init.c file 52 52 53 54 55 //////////////////////////////////56 void cluster_sysfs_register(void)57 {58 // TODO59 }60 53 61 54 ///////////////////////////////////////////////// -
trunk/kernel/kern/cluster.h
r407 r408 196 196 * This function allocates a new PID in local cluster, that becomes the process owner. 197 197 * It registers the process descriptor extended pointer in the local processs manager 198 * pref_tbl[] array. This function is called by the rpc_process_alloc_pid() function for199 * remote registration, or by the process_init_create() function for local registration.198 * pref_tbl[] array. This function is called by the process_make_fork() function, 199 * or by the process_init_create() function. 200 200 ****************************************************************************************** 201 201 * @ process : [in] extended pointer on the process descriptor. -
trunk/kernel/kern/core.c
r407 r408 111 111 112 112 // handle scheduler 113 if( (ticks % CONFIG_SCHED_TICKS_PER_QUANTUM) == 0 ) sched_yield( );113 if( (ticks % CONFIG_SCHED_TICKS_PER_QUANTUM) == 0 ) sched_yield( "TICK"); 114 114 115 115 // update DQDT -
trunk/kernel/kern/do_syscall.c
r407 r408 29 29 #include <printk.h> 30 30 #include <syscalls.h> 31 #include <shared_syscalls.h> 31 32 32 /////////////////////////////////////////////////////////////////////////////////////// //////33 /////////////////////////////////////////////////////////////////////////////////////// 33 34 // This ƒonction should never be called... 34 /////////////////////////////////////////////////////////////////////////////////////// //////35 /////////////////////////////////////////////////////////////////////////////////////// 35 36 static int sys_undefined() 36 37 { … … 39 40 } 40 41 41 /////////////////////////////////////////////////////////////////////////////////////// //////42 /////////////////////////////////////////////////////////////////////////////////////// 42 43 // This array of pointers define the kernel functions implementing the syscalls. 43 44 // It must be kept consistent with the enum in "shared_syscalls.h" file. 44 /////////////////////////////////////////////////////////////////////////////////////// //////45 /////////////////////////////////////////////////////////////////////////////////////// 45 46 46 47 typedef int (*sys_func_t) (); … … 59 60 sys_mutex, // 9 60 61 61 sys_ undefined,// 1062 sys_exit, // 10 62 63 sys_munmap, // 11 63 64 sys_open, // 12 … … 101 102 }; 102 103 104 //////////////////////////////////// 105 char * syscall_str( uint32_t index ) 106 { 107 if ( index == SYS_THREAD_EXIT ) return "THREAD_EXIT"; // 0 108 else if( index == SYS_THREAD_YIELD ) return "THREAD_YIELD"; // 1 109 else if( index == SYS_THREAD_CREATE ) return "THREAD_CREATE"; // 2 110 else if( index == SYS_THREAD_JOIN ) return "THREAD_JOIN"; // 3 111 else if( index == SYS_THREAD_DETACH ) return "THREAD_DETACH"; // 4 112 else if( index == SYS_SEM ) return "SEM"; // 6 113 else if( index == SYS_CONDVAR ) return "CONDVAR"; // 7 114 else if( index == SYS_BARRIER ) return "BARRIER"; // 8 115 else if( index == SYS_MUTEX ) return "MUTEX"; // 9 116 117 else if( index == SYS_EXIT ) return "EXIT"; // 10 118 else if( index == SYS_MUNMAP ) return "MUNMAP"; // 11 119 else if( index == SYS_OPEN ) return "OPEN"; // 12 120 else if( index == SYS_MMAP ) return "MMAP"; // 13 121 else if( index == SYS_READ ) return "READ"; // 14 122 else if( index == SYS_WRITE ) return "WRITE"; // 15 123 else if( index == SYS_LSEEK ) return "LSEEK"; // 16 124 else if( index == SYS_CLOSE ) return "CLOSE"; // 17 125 else if( index == SYS_UNLINK ) return "UNLINK"; // 18 126 else if( index == SYS_PIPE ) return "PIPE"; // 19 127 128 else if( index == SYS_CHDIR ) return "CHDIR"; // 20 129 else if( index == SYS_MKDIR ) return "MKDIR"; // 21 130 else if( index == SYS_MKFIFO ) return "MKFIFO"; // 22 131 else if( index == SYS_OPENDIR ) return "OPENDIR"; // 23 132 else if( index == SYS_READDIR ) return "READDIR"; // 24 133 else if( index == SYS_CLOSEDIR ) return "CLOSEDIR"; // 25 134 else if( index == SYS_GETCWD ) return "GETCWD"; // 26 135 else if( index == SYS_ALARM ) return "ALARM"; // 28 136 else if( index == SYS_RMDIR ) return "RMDIR"; // 29 137 138 else if( index == SYS_UTLS ) return "UTLS"; // 30 139 else if( index == SYS_CHMOD ) return "CHMOD"; // 31 140 else if( index == SYS_SIGNAL ) return "SIGNAL"; // 32 141 else if( index == SYS_TIMEOFDAY ) return "TIMEOFDAY"; // 33 142 else if( index == SYS_KILL ) return "KILL"; // 34 143 else if( index == SYS_GETPID ) return "GETPID"; // 35 144 else if( index == SYS_FORK ) return "FORK"; // 36 145 else if( index == SYS_EXEC ) return "EXEC"; // 37 146 else if( index == SYS_STAT ) return "STAT"; // 38 147 else if( index == SYS_TRACE ) return "TRACE"; // 39 148 149 else if( index == SYS_GET_CONFIG ) return "GET_CONFIG"; // 40 150 else if( index == SYS_GET_CORE ) return "GET_CORE"; // 41 151 else if( index == SYS_GET_CYCLE ) return "GET_CYCLE"; // 42 152 else if( index == SYS_GET_SCHED ) return "GET_SCHED"; // 43 153 else if( index == SYS_PANIC ) return "PANIC"; // 44 154 else if( index == SYS_SLEEP ) return "SLEEP"; // 45 155 else if( index == SYS_WAKEUP ) return "WAKEUP"; // 46 156 157 else return "undefined"; 158 } 159 160 103 161 ////////////////////////////////// 104 162 reg_t do_syscall( thread_t * this, … … 109 167 reg_t service_num ) 110 168 { 111 int 169 int error = 0; 112 170 113 171 // update user time 114 172 thread_user_time_update( this ); 115 173 116 // enable interrupts117 hal_enable_irq( NULL );118 119 174 // check syscall index 120 175 if( service_num >= SYSCALLS_NR ) … … 128 183 } 129 184 130 #if( CONFIG_SYSCALL_DEBUG & 0x1)131 printk("\n[DBG] %s : pid = %x / trdid = %x / service #%d\n"132 " arg0 = %x / arg1 = %x / arg2 = %x / arg3 = %x\n",133 __FUNCTION__ , this->process->pid , this->trdid , service_num , arg0 , arg1 , arg2 , arg3 );134 #endif135 136 185 // reset errno 137 186 this->errno = 0; … … 140 189 error = syscall_tbl[service_num] ( arg0 , arg1 , arg2 , arg3 ); 141 190 142 // disable interrupt143 hal_disable_irq( NULL );144 145 191 // update kernel time 146 192 thread_kernel_time_update( this ); -
trunk/kernel/kern/do_syscall.h
r407 r408 30 30 #include <thread.h> 31 31 32 /************************************************************************************** ******32 /************************************************************************************** 33 33 * This function calls the kernel function defined by the <service_num> argument. 34 * The possible values for servic_num are defined in the syscalls/syscalls.h file. 35 ******************************************************************************************** 34 * The possible values for service_num are defined in the syscalls/syscalls.h file. 35 * It does NOT enable interrupts, that must be enabled by the kernel function 36 * depending on the implemented service. 37 ************************************************************************************** 36 38 * @ this : pointer on calling thread descriptor 37 39 * @ arg0 : kernel function argument 0 … … 41 43 * @ service_num : kernel service index 42 44 * @ return 0 if success / return non zero if failure. 43 ************************************************************************************* ******/45 *************************************************************************************/ 44 46 reg_t do_syscall( thread_t * this, 45 47 reg_t arg0, -
trunk/kernel/kern/kernel_init.c
r407 r408 122 122 vfs_ctx_t fs_context[FS_TYPES_NR] CONFIG_CACHE_LINE_ALIGNED; 123 123 124 // These variables are used by the sched_yield function to save SR value 125 __attribute__((section(".kdata"))) 126 uint32_t switch_save_sr[CONFIG_MAX_LOCAL_CORES] CONFIG_CACHE_LINE_ALIGNED; 127 128 #if CONFIG_READ_DEBUG 124 125 // TODO remove these debug variables used dans sys_read() 126 127 #if CONFIG_READ_DEBUG 129 128 uint32_t enter_sys_read; 130 129 uint32_t exit_sys_read; … … 342 341 /////////////////////////////////////////////////////////////////////////////////////////// 343 342 // This function allocates memory and initializes the chdev descriptors for the 344 // external (shared) peripherals other than the IOPIC, as specified by the boot_info ,345 // includingthe dynamic linking with the driver for the specified implementation.343 // external (shared) peripherals other than the IOPIC, as specified by the boot_info. 344 // This includes the dynamic linking with the driver for the specified implementation. 346 345 // These chdev descriptors are distributed on all clusters, using a modulo on a global 347 // index, identically computed in all clusters: In each cluster, the local CP0 core 348 // computes the global index for all external chdevs, and creates only the chdevs that 349 // must be placed in the local cluster. 346 // index, identically computed in all clusters. 347 // This function is executed in all clusters by the CP0 core, that computes a global index 348 // for all external chdevs. Each CP0 core creates only the chdevs that must be placed in 349 // the local cluster, because the global index matches the local index. 350 350 // The relevant entries in all copies of the devices directory are initialised. 351 351 /////////////////////////////////////////////////////////////////////////////////////////// … … 830 830 831 831 // all CP0s initialize the process_zero descriptor 832 if( core_lid == 0 ) process_ reference_init( &process_zero , 0 , XPTR_NULL);832 if( core_lid == 0 ) process_zero_init( &process_zero ); 833 833 834 834 // CP0 in cluster 0 initializes the PIC chdev, -
trunk/kernel/kern/printk.c
r407 r408 453 453 } 454 454 455 ////////////////////////// 456 void puts( char * string ) 457 { 458 uint32_t save_sr; 459 uint32_t n = 0; 460 461 // compute string length 462 while ( string[n] > 0 ) n++; 463 464 // get pointers on TXT0 chdev 465 xptr_t txt0_xp = chdev_dir.txt_tx[0]; 466 cxy_t txt0_cxy = GET_CXY( txt0_xp ); 467 chdev_t * txt0_ptr = GET_PTR( txt0_xp ); 468 469 // get extended pointer on remote TXT0 chdev lock 470 xptr_t lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock ); 471 472 // get TXT0 lock in busy waiting mode 473 remote_spinlock_lock_busy( lock_xp , &save_sr ); 474 475 // display string on TTY0 476 dev_txt_sync_write( string , n ); 477 478 // release TXT0 lock in busy waiting mode 479 remote_spinlock_unlock_busy( lock_xp , save_sr ); 480 } 481 482 483 ///////////////////////// 484 void putx( uint32_t val ) 485 { 486 static const char HexaTab[] = "0123456789ABCDEF"; 487 488 char buf[10]; 489 uint32_t c; 490 uint32_t save_sr; 491 492 buf[0] = '0'; 493 buf[1] = 'x'; 494 495 // build buffer 496 for (c = 0; c < 8; c++) 497 { 498 buf[9 - c] = HexaTab[val & 0xF]; 499 val = val >> 4; 500 } 501 502 // get pointers on TXT0 chdev 503 xptr_t txt0_xp = chdev_dir.txt_tx[0]; 504 cxy_t txt0_cxy = GET_CXY( txt0_xp ); 505 chdev_t * txt0_ptr = GET_PTR( txt0_xp ); 506 507 // get extended pointer on remote TXT0 chdev lock 508 xptr_t lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock ); 509 510 // get TXT0 lock in busy waiting mode 511 remote_spinlock_lock_busy( lock_xp , &save_sr ); 512 513 // display string on TTY0 514 dev_txt_sync_write( buf , 10 ); 515 516 // release TXT0 lock in busy waiting mode 517 remote_spinlock_unlock_busy( lock_xp , save_sr ); 518 } 519 520 ///////////////////////// 521 void putl( uint64_t val ) 522 { 523 static const char HexaTab[] = "0123456789ABCDEF"; 524 525 char buf[18]; 526 uint32_t c; 527 uint32_t save_sr; 528 529 buf[0] = '0'; 530 buf[1] = 'x'; 531 532 // build buffer 533 for (c = 0; c < 16; c++) 534 { 535 buf[17 - c] = HexaTab[(unsigned int)val & 0xF]; 536 val = val >> 4; 537 } 538 539 // get pointers on TXT0 chdev 540 xptr_t txt0_xp = chdev_dir.txt_tx[0]; 541 cxy_t txt0_cxy = GET_CXY( txt0_xp ); 542 chdev_t * txt0_ptr = GET_PTR( txt0_xp ); 543 544 // get extended pointer on remote TXT0 chdev lock 545 xptr_t lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock ); 546 547 // get TXT0 lock in busy waiting mode 548 remote_spinlock_lock_busy( lock_xp , &save_sr ); 549 550 // display string on TTY0 551 dev_txt_sync_write( buf , 18 ); 552 553 // release TXT0 lock in busy waiting mode 554 remote_spinlock_unlock_busy( lock_xp , save_sr ); 555 } 556 455 557 456 558 // Local Variables: -
trunk/kernel/kern/printk.h
r407 r408 90 90 91 91 /********************************************************************************** 92 * This function displays a "PANIC" message and forces the calling core in93 * sleeping mode if a Boolean condition is false.94 * Th ese functions areactually used to debug the kernel...92 * This function displays a formated message on kernel TXT0 terminal, 93 * and forces the calling core in sleeping mode if a Boolean condition is false. 94 * This function is actually used to debug the kernel... 95 95 ********************************************************************************** 96 96 * @ condition : condition that must be true. … … 101 101 const char * function_name, 102 102 char * format , ... ); 103 104 /********************************************************************************** 105 * This function displays a non-formated message on kernel TXT0 terminal. 106 * This function is actually used to debug the assembly level kernel functions. 107 ********************************************************************************** 108 * @ string : non-formatted string. 109 *********************************************************************************/ 110 void puts( char * string ); 111 112 /********************************************************************************** 113 * This function displays a 32 bits value in hexadecimal on kernel TXT0 terminal. 114 * This function is actually used to debug the assembly level kernel functions. 115 ********************************************************************************** 116 * @ val : 32 bits unsigned value. 117 *********************************************************************************/ 118 void putx( uint32_t val ); 119 120 /********************************************************************************** 121 * This function displays a 64 bits value in hexadecimal on kernel TXT0 terminal. 122 * This function is actually used to debug the assembly level kernel functions. 123 ********************************************************************************** 124 * @ val : 64 bits unsigned value. 125 *********************************************************************************/ 126 void putl( uint64_t val ); 127 103 128 104 129 #define panic(fmt, ...) _panic("\n[PANIC] %s(): " fmt "\n", __func__, ##__VA_ARGS__) -
trunk/kernel/kern/process.c
r407 r408 82 82 } 83 83 84 ///////////////////////////////////////////// 85 void process_zero_init( process_t * process ) 86 { 87 // initialize PID, PPID anf PREF 88 process->pid = 0; 89 process->ppid = 0; 90 process->ref_xp = XPTR( local_cxy , process ); 91 92 // reset th_tbl[] array as empty 93 uint32_t i; 94 for( i = 0 ; i < CONFIG_THREAD_MAX_PER_CLUSTER ; i++ ) 95 { 96 process->th_tbl[i] = NULL; 97 } 98 process->th_nr = 0; 99 spinlock_init( &process->th_lock ); 100 101 hal_fence(); 102 103 process_dmsg("\n[DBG] %s : core[%x,%d] exit for process %x\n", 104 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , process->pid ); 105 106 } // end process_zero_init() 107 84 108 ///////////////////////////////////////////////// 85 109 void process_reference_init( process_t * process, 86 110 pid_t pid, 87 xptr_t parent_xp ) 88 { 89 cxy_t parent_cxy; 90 process_t * parent_ptr; 91 pid_t parent_pid; 92 111 pid_t ppid, 112 xptr_t model_xp ) 113 { 114 cxy_t model_cxy; 115 process_t * model_ptr; 93 116 error_t error1; 94 117 error_t error2; … … 104 127 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , pid ); 105 128 106 // get parent process cluster, local pointer, and pid 107 // for all processes other than kernel process 108 if( process == &process_zero ) // kernel process 109 { 110 assert( (pid == 0) , __FUNCTION__ , "process_zero must have PID = 0\n"); 111 112 parent_cxy = 0; 113 parent_ptr = NULL; 114 parent_pid = 0; 115 } 116 else // user process 117 { 118 parent_cxy = GET_CXY( parent_xp ); 119 parent_ptr = (process_t *)GET_PTR( parent_xp ); 120 parent_pid = hal_remote_lw( XPTR( parent_cxy , &parent_ptr->pid ) ); 121 } 129 // get model process cluster and local pointer 130 model_cxy = GET_CXY( model_xp ); 131 model_ptr = (process_t *)GET_PTR( model_xp ); 122 132 123 133 // initialize PID, PPID, and REF 124 134 process->pid = pid; 125 process->ppid = p arent_pid;135 process->ppid = ppid; 126 136 process->ref_xp = XPTR( local_cxy , process ); 127 137 128 // initialize vmm, fd array and others structures for user processes. 129 // These structures are not used by the kernel process. 130 if( pid ) 131 { 132 // initialize vmm (not for kernel) 133 vmm_init( process ); 134 135 process_dmsg("\n[DBG] %s : core[%x,%d] / vmm initialised for process %x\n", 138 // initialize vmm 139 vmm_init( process ); 140 141 process_dmsg("\n[DBG] %s : core[%x,%d] / vmm empty for process %x\n", 136 142 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , pid ); 137 143 138 // initialize fd_array (not for kernel) 139 process_fd_init( process ); 140 141 // create stdin / stdout / stderr pseudo-files (not for kernel) 142 if( parent_pid == 0 ) // process_init 143 { 144 error1 = vfs_open( process, 145 CONFIG_INIT_STDIN, 146 O_RDONLY, 147 0, // FIXME chmod 148 &stdin_xp, 149 &stdin_id ); 150 151 error2 = vfs_open( process, 152 CONFIG_INIT_STDOUT, 153 O_WRONLY, 154 0, // FIXME chmod 155 &stdout_xp, 156 &stdout_id ); 157 158 error3 = vfs_open( process, 159 CONFIG_INIT_STDERR, 160 O_WRONLY, 161 0, // FIXME chmod 162 &stderr_xp, 163 &stderr_id ); 164 } 165 else // user process 166 { 167 error1 = vfs_open( process, 168 CONFIG_USER_STDIN, 169 O_RDONLY, 170 0, // FIXME chmod 171 &stdin_xp, 172 &stdin_id ); 173 174 error2 = vfs_open( process, 175 CONFIG_USER_STDOUT, 176 O_WRONLY, 177 0, // FIXME chmod 178 &stdout_xp, 179 &stdout_id ); 180 181 error3 = vfs_open( process, 182 CONFIG_USER_STDERR, 183 O_WRONLY, 184 0, // FIXME chmod 185 &stderr_xp, 186 &stderr_id ); 187 } 188 189 assert( ((error1 == 0) && (error2 == 0) && (error3 == 0)) , __FUNCTION__ , 190 "cannot open stdin/stdout/stderr pseudo files\n"); 191 192 assert( ((stdin_id == 0) && (stdout_id == 1) && (stderr_id == 2)) , __FUNCTION__ , 193 "bad indexes : stdin %d / stdout %d / stderr %d \n", stdin_id , stdout_id , stderr_id ); 144 // initialize fd_array (not for kernel) 145 process_fd_init( process ); 146 147 // create stdin / stdout / stderr pseudo-files 148 if( ppid == 0 ) // process_init 149 { 150 error1 = vfs_open( process, 151 CONFIG_INIT_STDIN, 152 O_RDONLY, 153 0, // FIXME chmod 154 &stdin_xp, 155 &stdin_id ); 156 157 error2 = vfs_open( process, 158 CONFIG_INIT_STDOUT, 159 O_WRONLY, 160 0, // FIXME chmod 161 &stdout_xp, 162 &stdout_id ); 163 164 error3 = vfs_open( process, 165 CONFIG_INIT_STDERR, 166 O_WRONLY, 167 0, // FIXME chmod 168 &stderr_xp, 169 &stderr_id ); 170 } 171 else // other user process 172 { 173 error1 = vfs_open( process, 174 CONFIG_USER_STDIN, 175 O_RDONLY, 176 0, // FIXME chmod 177 &stdin_xp, 178 &stdin_id ); 179 180 error2 = vfs_open( process, 181 CONFIG_USER_STDOUT, 182 O_WRONLY, 183 0, // FIXME chmod 184 &stdout_xp, 185 &stdout_id ); 186 187 error3 = vfs_open( process, 188 CONFIG_USER_STDERR, 189 O_WRONLY, 190 0, // FIXME chmod 191 &stderr_xp, 192 &stderr_id ); 193 } 194 195 assert( ((error1 == 0) && (error2 == 0) && (error3 == 0)) , __FUNCTION__ , 196 "cannot open stdin/stdout/stderr pseudo files\n"); 197 198 assert( ((stdin_id == 0) && (stdout_id == 1) && (stderr_id == 2)) , __FUNCTION__ , 199 "bad indexes : stdin %d / stdout %d / stderr %d \n", stdin_id , stdout_id , stderr_id ); 200 201 // initialize specific files, cwd_lock, and fd_array 202 process->vfs_root_xp = (xptr_t)hal_remote_lwd( XPTR( model_cxy, 203 &model_ptr->vfs_root_xp ) ); 204 process->vfs_cwd_xp = (xptr_t)hal_remote_lwd( XPTR( model_cxy, 205 &model_ptr->vfs_cwd_xp ) ); 206 process->vfs_bin_xp = (xptr_t)hal_remote_lwd( XPTR( model_cxy, 207 &model_ptr->vfs_bin_xp ) ); 208 vfs_file_count_up( process->vfs_root_xp ); 209 vfs_file_count_up( process->vfs_cwd_xp ); 210 vfs_file_count_up( process->vfs_bin_xp ); 211 212 process_fd_remote_copy( XPTR( local_cxy , &process->fd_array ), 213 XPTR( model_cxy , &model_ptr->fd_array ) ); 214 215 remote_rwlock_init( XPTR( local_cxy , &process->cwd_lock ) ); 194 216 195 217 process_dmsg("\n[DBG] %s : core[%x,%d] / fd array initialised for process %x\n", 196 218 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , pid ); 197 219 198 199 // reset reference process files structures and cwd_lock (not for kernel) 200 process->vfs_root_xp = XPTR_NULL; 201 process->vfs_bin_xp = XPTR_NULL; 202 process->vfs_cwd_xp = XPTR_NULL; 203 remote_rwlock_init( XPTR( local_cxy , &process->cwd_lock ) ); 204 205 // reset children list root (not for kernel) 206 xlist_root_init( XPTR( local_cxy , &process->children_root ) ); 207 process->children_nr = 0; 208 209 // reset semaphore / mutex / barrier / condvar list roots (nor for kernel) 210 xlist_root_init( XPTR( local_cxy , &process->sem_root ) ); 211 xlist_root_init( XPTR( local_cxy , &process->mutex_root ) ); 212 xlist_root_init( XPTR( local_cxy , &process->barrier_root ) ); 213 xlist_root_init( XPTR( local_cxy , &process->condvar_root ) ); 214 remote_spinlock_init( XPTR( local_cxy , &process->sync_lock ) ); 215 216 // register new process in the parent children list (nor for kernel) 217 xptr_t entry = XPTR( local_cxy , &process->brothers_list ); 218 xptr_t root = XPTR( parent_cxy , &parent_ptr->children_root ); 219 xlist_add_first( root , entry ); 220 } 221 222 // reset th_tbl[] array as empty 220 // reset children list root 221 xlist_root_init( XPTR( local_cxy , &process->children_root ) ); 222 process->children_nr = 0; 223 224 // reset semaphore / mutex / barrier / condvar list roots 225 xlist_root_init( XPTR( local_cxy , &process->sem_root ) ); 226 xlist_root_init( XPTR( local_cxy , &process->mutex_root ) ); 227 xlist_root_init( XPTR( local_cxy , &process->barrier_root ) ); 228 xlist_root_init( XPTR( local_cxy , &process->condvar_root ) ); 229 remote_spinlock_init( XPTR( local_cxy , &process->sync_lock ) ); 230 231 // register new process in the local cluster manager pref_tbl[] 232 lpid_t lpid = LPID_FROM_PID( pid ); 233 LOCAL_CLUSTER->pmgr.pref_tbl[lpid] = XPTR( local_cxy , process ); 234 235 // register new process descriptor in local cluster manager local_list 236 cluster_process_local_link( process ); 237 238 // register new process descriptor in local cluster manager copies_list 239 cluster_process_copies_link( process ); 240 241 // reset th_tbl[] array as empty in process descriptor 223 242 uint32_t i; 224 243 for( i = 0 ; i < CONFIG_THREAD_MAX_PER_CLUSTER ; i++ ) … … 228 247 process->th_nr = 0; 229 248 spinlock_init( &process->th_lock ); 230 231 // register new process descriptor in local cluster manager local_list232 cluster_process_local_link( process );233 234 // register new process descriptor in owner cluster manager copies_list235 cluster_process_copies_link( process );236 237 // initialize signal manager TODO [AG]238 249 239 250 hal_fence(); … … 370 381 uint32_t count; // thread counter 371 382 372 printk("\n @@@%s enter\n", __FUNCTION__ );383 printk("\n[@@@] %s enter\n", __FUNCTION__ ); 373 384 374 385 // get lock protecting th_tbl[] … … 390 401 } 391 402 392 printk("\n @@@%s : %d signal(s) sent\n", __FUNCTION__, count );403 printk("\n[@@@] %s : %d signal(s) sent\n", __FUNCTION__, count ); 393 404 394 405 // second loop on threads to wait acknowledge from scheduler, … … 403 414 { 404 415 405 printk("\n @@@%s start polling at cycle %d\n", __FUNCTION__ , hal_time_stamp() );416 printk("\n[@@@] %s start polling at cycle %d\n", __FUNCTION__ , hal_time_stamp() ); 406 417 407 418 // poll the THREAD_SIG_KILL bit until reset 408 419 while( thread->signals & THREAD_SIG_KILL ) asm volatile( "nop" ); 409 420 410 printk("\n @@@%s exit polling\n", __FUNCTION__ );421 printk("\n[@@@] %s exit polling\n", __FUNCTION__ ); 411 422 412 423 // detach target thread from parent if attached … … 424 435 } 425 436 426 printk("\n @@@%s : %d ack(s) received\n", __FUNCTION__, count );437 printk("\n[@@@] %s : %d ack(s) received\n", __FUNCTION__, count ); 427 438 428 439 // release lock protecting th_tbl[] … … 432 443 process_destroy( process ); 433 444 434 printk("\n[ @@@] %s : core[%x,%d] exit\n",445 printk("\n[DBG] %s : core[%x,%d] exit\n", 435 446 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid ); 436 447 … … 688 699 } // process_remove_thread() 689 700 701 ///////////////////////////////////////////////////////// 702 error_t process_make_fork( xptr_t parent_process_xp, 703 xptr_t parent_thread_xp, 704 pid_t * child_pid, 705 thread_t ** child_thread ) 706 { 707 process_t * process; // local pointer on child process descriptor 708 thread_t * thread; // local pointer on child thread descriptor 709 pid_t new_pid; // process identifier for child process 710 pid_t parent_pid; // process identifier for parent process 711 xptr_t ref_xp; // extended pointer on reference process 712 error_t error; 713 714 // get cluster and local pointer for parent process 715 cxy_t parent_process_cxy = GET_CXY( parent_process_xp ); 716 process_t * parent_process_ptr = (process_t *)GET_PTR( parent_process_xp ); 717 718 // get parent process PID 719 parent_pid = hal_remote_lw( XPTR( parent_process_cxy , &parent_process_ptr->pid ) ); 720 721 // check parent process is the reference 722 ref_xp = hal_remote_lwd( XPTR( parent_process_cxy , &parent_process_ptr->ref_xp ) ); 723 assert( (parent_process_xp == ref_xp ) , __FUNCTION__ , 724 "parent process must be the reference process\n" ); 725 726 process_dmsg("\n[DBG] %s : core[%x,%d] enter at cycle %d\n", 727 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid , hal_get_cycles() ); 728 729 // allocate a process descriptor 730 process = process_alloc(); 731 if( process == NULL ) 732 { 733 printk("\n[ERROR] in %s : cannot get process in cluster %x\n", 734 __FUNCTION__, local_cxy ); 735 return -1; 736 } 737 738 process_dmsg("\n[DBG] %s : core[%x,%d] child process descriptor allocated at cycle %d\n", 739 __FUNCTION__ , local_cxy, CURRENT_THREAD->core->lid, hal_get_cycles() ); 740 741 // allocate a child PID from local cluster 742 error = cluster_pid_alloc( XPTR( local_cxy , process ) , &new_pid ); 743 if( (error != 0) || (new_pid == 0) ) 744 { 745 printk("\n[ERROR] in %s : cannot get PID in cluster %x\n", 746 __FUNCTION__, local_cxy ); 747 process_free( process ); 748 return -1; 749 } 750 751 process_dmsg("\n[DBG] %s : core[%x, %d] child process PID allocated = %x at cycle %d\n", 752 __FUNCTION__ , local_cxy, CURRENT_THREAD->core->lid, new_pid , hal_get_cycles() ); 753 754 // initializes child process descriptor from parent process descriptor 755 process_reference_init( process, 756 new_pid, 757 parent_pid, 758 parent_process_xp ); 759 760 process_dmsg("\n[DBG] %s : core[%x, %d] child process initialised at cycle %d\n", 761 __FUNCTION__ , local_cxy, CURRENT_THREAD->core->lid, hal_get_cycles() ); 762 763 // copy VMM from parent descriptor to child descriptor 764 error = vmm_fork_copy( process, 765 parent_process_xp ); 766 if( error ) 767 { 768 printk("\n[ERROR] in %s : cannot copy VMM in cluster %x\n", 769 __FUNCTION__, local_cxy ); 770 process_free( process ); 771 cluster_pid_release( new_pid ); 772 return -1; 773 } 774 775 process_dmsg("\n[DBG] %s : core[%x, %d] child process VMM copied at cycle %d\n", 776 __FUNCTION__ , local_cxy, CURRENT_THREAD->core->lid, hal_get_cycles() ); 777 778 // create child thread descriptor from parent thread descriptor 779 error = thread_user_fork( parent_thread_xp, 780 process, 781 &thread ); 782 if( error ) 783 { 784 printk("\n[ERROR] in %s : cannot create thread in cluster %x\n", 785 __FUNCTION__, local_cxy ); 786 process_free( process ); 787 cluster_pid_release( new_pid ); 788 return -1; 789 } 790 791 process_dmsg("\n[DBG] %s : core[%x,%d] child thread created at cycle %d\n", 792 __FUNCTION__ , local_cxy, CURRENT_THREAD->core->lid, hal_get_cycles() ); 793 794 // update parent process GPT to set Copy_On_Write for shared data vsegs 795 // this includes all replicated GPT copies 796 if( parent_process_cxy == local_cxy ) // reference is local 797 { 798 vmm_set_cow( parent_process_ptr ); 799 } 800 else // reference is remote 801 { 802 rpc_vmm_set_cow_client( parent_process_cxy, 803 parent_process_ptr ); 804 } 805 806 process_dmsg("\n[DBG] %s : core[%x,%d] COW set in parent_process at cycle %d\n", 807 __FUNCTION__ , local_cxy, CURRENT_THREAD->core->lid, hal_get_cycles() ); 808 809 // update children list in parent process 810 xlist_add_last( XPTR( parent_process_cxy , &parent_process_ptr->children_root ), 811 XPTR( local_cxy , &process->brothers_list ) ); 812 hal_remote_atomic_add( XPTR( parent_process_cxy, 813 &parent_process_ptr->children_nr), 1 ); 814 815 // vmm_display( process , true ); 816 // vmm_display( parent_process_ptr , true ); 817 // sched_display( 0 ); 818 819 // return success 820 *child_thread = thread; 821 *child_pid = new_pid; 822 823 return 0; 824 825 } // end process_make_fork() 826 690 827 ///////////////////////////////////////////////////// 691 828 error_t process_make_exec( exec_info_t * exec_info ) 692 829 { 693 char * path; // pathname to .elf file 694 bool_t keep_pid; // new process keep parent PID if true 695 process_t * process; // local pointer on new process 696 pid_t pid; // new process pid 697 xptr_t parent_xp; // extended pointer on parent process 698 cxy_t parent_cxy; // parent process local cluster 699 process_t * parent_ptr; // local pointer on parent process 700 uint32_t parent_pid; // parent process identifier 701 thread_t * thread; // pointer on new thread 702 pthread_attr_t attr; // main thread attributes 703 core_t * core; // pointer on selected core 704 lid_t lid; // selected core local index 830 char * path; // pathname to .elf file 831 process_t * old; // local pointer on old process 832 process_t * new; // local pointer on new process 833 pid_t pid; // old process identifier 834 thread_t * thread; // pointer on new thread 835 pthread_attr_t attr; // main thread attributes 836 lid_t lid; // selected core local index 705 837 error_t error; 706 838 707 // get .elf pathname, parent_xp, and keep_pid flag from exec_info 708 path = exec_info->path; 709 parent_xp = exec_info->parent_xp; 710 keep_pid = exec_info->keep_pid; 711 712 process_dmsg("\n[DBG] %s : core[%x,%d] enters for path = %s\n", 713 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, path ); 714 715 // get parent process cluster and local pointer 716 parent_cxy = GET_CXY( parent_xp ); 717 parent_ptr = (process_t *)GET_PTR( parent_xp ); 718 parent_pid = hal_remote_lw( XPTR( parent_cxy , &parent_ptr->pid ) ); 719 720 // allocates memory for process descriptor 721 process = process_alloc(); 722 if( process == NULL ) return -1; 723 724 // get PID 725 if( keep_pid ) // keep parent PID 726 { 727 pid = parent_pid; 728 } 729 else // get new PID from local cluster 730 { 731 error = cluster_pid_alloc( XPTR( local_cxy , process ) , &pid ); 732 if( error ) return -1; 733 } 734 735 process_dmsg("\n[DBG] %s : core[%x,%d] created process %x for path = %s\n", 839 // get .elf pathname and PID from exec_info 840 path = exec_info->path; 841 pid = exec_info->pid; 842 843 // check local cluster is old process owner 844 assert( (CXY_FROM_PID( pid ) == local_cxy), __FUNCTION__, 845 "local cluster %x is not owner for process %x\n", local_cxy, pid ); 846 847 exec_dmsg("\n[DBG] %s : core[%x,%d] enters for process %x / path = %s\n", 848 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, pid , path ); 849 850 // get old process local pointer 851 old = (process_t *)cluster_get_local_process_from_pid( pid ); 852 853 assert( (old != NULL ) , __FUNCTION__ , 854 "process %x not found in cluster %x\n", pid , local_cxy ); 855 856 // allocate memory for new process descriptor 857 new = process_alloc(); 858 859 // initialize new process descriptor 860 process_reference_init( new, 861 old->pid, // same as old 862 old->ppid, // same as old 863 XPTR( local_cxy , old ) ); 864 865 exec_dmsg("\n[DBG] %s : core[%x,%d] created new process %x / path = %s\n", 736 866 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, pid, path ); 737 867 738 // initialize the process descriptor as the reference 739 process_reference_init( process , pid , parent_xp ); 740 741 process_dmsg("\n[DBG] %s : core[%x,%d] initialized process %x / path = %s\n", 742 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, pid, path ); 743 744 // initialize vfs_root and vfs_cwd from parent process 745 xptr_t vfs_root_xp = hal_remote_lwd( XPTR( parent_cxy , &parent_ptr->vfs_root_xp ) ); 746 vfs_file_count_up( vfs_root_xp ); 747 process->vfs_root_xp = vfs_root_xp; 748 749 xptr_t vfs_cwd_xp = hal_remote_lwd( XPTR( parent_cxy , &parent_ptr->vfs_cwd_xp ) ); 750 vfs_file_count_up( vfs_cwd_xp ); 751 process->vfs_cwd_xp = vfs_cwd_xp; 752 753 // initialize embedded fd_array from parent process 754 process_fd_remote_copy( XPTR( local_cxy , &process->fd_array ), 755 XPTR( parent_cxy , &parent_ptr->fd_array) ); 756 757 process_dmsg("\n[DBG] %s : core[%x,%d] copied fd_array for process %x\n", 758 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, pid ); 759 760 // register "code" and "data" vsegs as well as the process entry-point in VMM, 761 // using information contained in the elf file. 762 error = elf_load_process( path , process ); 763 764 if( error ) 868 // register "code" and "data" vsegs as well as entry-point 869 // in new process VMM, using information contained in the elf file. 870 if( elf_load_process( path , new ) ) 765 871 { 766 872 printk("\n[ERROR] in %s : failed to access .elf file for process %x / path = %s\n", 767 768 process_destroy( process);769 return error;873 __FUNCTION__, pid , path ); 874 process_destroy( new ); 875 return -1; 770 876 } 771 877 772 process_dmsg("\n[DBG] %s : core[%x,%d] registered code/data vsegs forprocess %x / path = %s\n",878 exec_dmsg("\n[DBG] %s : core[%x,%d] registered code/data vsegs / process %x / path = %s\n", 773 879 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, pid, path ); 774 880 775 // select a core in cluster881 // select a core in local cluster to execute the main thread 776 882 lid = cluster_select_local_core(); 777 core = &LOCAL_CLUSTER->core_tbl[lid];778 883 779 884 // initialize pthread attributes for main thread … … 784 889 // create and initialize thread descriptor 785 890 error = thread_user_create( pid, 786 (void *) process->vmm.entry_point,891 (void *)new->vmm.entry_point, 787 892 exec_info->args_pointers, 788 893 &attr, … … 792 897 printk("\n[ERROR] in %s : cannot create thread for process %x / path = %s\n", 793 898 __FUNCTION__, pid , path ); 794 process_destroy( process);795 return error;899 process_destroy( new ); 900 return -1; 796 901 } 797 902 798 process_dmsg("\n[DBG] %s : core[%x,%d] created thread %x for process %x / path = %s\n", 799 __FUNCTION__ , local_cxy, CURRENT_THREAD->core->lid, thread->trdid, pid, path ); 800 801 // update children list in parent process 802 xlist_add_last( XPTR( parent_cxy , &parent_ptr->children_root ), 803 XPTR( local_cxy , &process->brothers_list ) ); 804 hal_remote_atomic_add( XPTR( parent_cxy , &parent_ptr->children_nr) , 1 ); 903 exec_dmsg("\n[DBG] %s : core[%x,%d] created main thread %x for new process %x\n", 904 __FUNCTION__ , local_cxy, CURRENT_THREAD->core->lid, thread->trdid, pid ); 905 906 // update children list (rooted in parent process) 907 xlist_replace( XPTR( local_cxy , &old->brothers_list ) , 908 XPTR( local_cxy , &new->brothers_list ) ); 909 910 // FIXME request destruction of old process copies and threads in all clusters 805 911 806 912 // activate new thread 807 913 thread_unblock( XPTR( local_cxy , thread ) , THREAD_BLOCKED_GLOBAL ); 808 914 809 process_dmsg("\n[DBG] %s : core[%x,%d] exit for path = %s\n",915 exec_dmsg("\n[DBG] %s : core[%x,%d] exit for path = %s\n", 810 916 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, path ); 811 917 … … 818 924 { 819 925 exec_info_t exec_info; // structure to be passed to process_make_exec() 820 xptr_t parent_xp; // extended pointer on parent process. 926 process_t * process; // local pointer on process_init descriptor 927 pid_t pid; // process_init identifier 821 928 error_t error; 822 929 … … 824 931 __FUNCTION__ , local_cxy ); 825 932 826 // parent process is local kernel process 827 parent_xp = XPTR( local_cxy , &process_zero ); 933 // allocates memory for process descriptor from local cluster 934 process = process_alloc(); 935 if( process == NULL ) 936 { 937 printk("\n[PANIC] in %s : no memory for process descriptor in cluster %x\n", 938 __FUNCTION__, local_cxy ); 939 } 940 941 // get new PID from local cluster 942 error = cluster_pid_alloc( XPTR( local_cxy , process ) , &pid ); 943 if( error ) 944 { 945 printk("\n[PANIC] in %s : cannot allocate PID in cluster %x\n", 946 __FUNCTION__, local_cxy ); 947 } 948 949 // initialise the process desciptor (parent is local kernel process) 950 process_reference_init( process, 951 pid, 952 process_zero.pid, 953 XPTR( local_cxy , &process_zero ) ); 828 954 829 955 // initialize the exec_info structure 830 exec_info.keep_pid = false; 831 exec_info.parent_xp = parent_xp; 832 strcpy( exec_info.path , CONFIG_PROCESS_INIT_PATH ); 956 exec_info.pid = pid; 833 957 exec_info.args_nr = 0; 834 958 exec_info.envs_nr = 0; 835 836 // initialize process_init and create thread_init 959 strcpy( exec_info.path , CONFIG_PROCESS_INIT_PATH ); 960 961 // update process descriptor and create thread descriptor 837 962 error = process_make_exec( &exec_info ); 838 963 839 if( error ) panic("cannot initialize process_init in cluster %x", local_cxy ); 964 if( error ) 965 { 966 printk("\n[PANIC] in %s : cannot exec %s in cluster %x\n", 967 __FUNCTION__, CONFIG_PROCESS_INIT_PATH , local_cxy ); 968 } 840 969 841 970 process_dmsg("\n[DBG] %s : exit in cluster %x\n", -
trunk/kernel/kern/process.h
r407 r408 137 137 /********************************************************************************************* 138 138 * This structure defines the information required by the process_make_exec() function 139 * to create a new reference process descriptor, and the associated main thread. 139 * to create a new reference process descriptor, and the associated main thread, 140 * in the parent process owner cluster. 140 141 ********************************************************************************************/ 141 142 142 143 typedef struct exec_info_s 143 144 { 144 xptr_t parent_xp; /*! extended pointer on parent process descriptor */ 145 bool_t keep_pid; /*! keep parent PID if true / new PID if false */ 145 pid_t pid; /*! process identifier (both parent and child) */ 146 146 147 147 char path[CONFIG_VFS_MAX_PATH_LENGTH]; /*! .elf file path */ … … 187 187 188 188 /********************************************************************************************* 189 * This function initializes a new process descriptor, in the reference cluster. 190 * The PID value must have been defined previously by the owner cluster manager. 191 * The reference cluster can be different from the owner cluster. 192 * It set the pid / ppid / ref_xp fields. 193 * It registers this process descriptor in three lists: 194 * - the children_list in the parent reference process descriptor. 195 * - the local_list, rooted in the reference cluster manager. 196 * - the copies_list, rooted in the owner cluster manager. 197 * It resets the embedded structures such as the VMM or the file descriptor array. 198 ********************************************************************************************* 199 * @ process : [in] pointer on process descriptor to initialize. 200 * @ pid : [in] process identifier defined by owner cluster. 201 * @ parent_xp : [in] extended pointer on parent process. 189 * This function initialize, in each cluster, the kernel "process_zero", that is the owner 190 * of all kernel threads in a given cluster. It is called by the kernel_init() function. 191 * Both the PID and PPID fields are set to zero, and the ref_xp is the local process_zero. 192 * The th_tbl[] is initialized as empty. 193 ********************************************************************************************* 194 * @ process : [in] pointer on local process descriptor to initialize. 195 ********************************************************************************************/ 196 void process_zero_init( process_t * process ); 197 198 /********************************************************************************************* 199 * This function initializes a local, reference user process descriptor from another process 200 * descriptor, defined by the <model_xp> argument. The <process> descriptor, the <pid>, and 201 * the <ppid> arguments must be previously defined by the caller. 202 * It can be called by three functions, depending on the process type: 203 * 1) if "process" is the user "process_init", the parent is the kernel process. It is 204 * called once, by the process_init_create() function in cluster[xmax-1][ymax-1]. 205 * 2) if the caller is the process_make_fork() function, the model is generally a remote 206 * process, that is also the parent process. 207 * 3) if the caller is the process_make_exec() function, the model is always a local process, 208 * but the parent is the parent of the model process. 209 * 210 * The following fields are initialised (for all process but process_zero). 211 * - It set the pid / ppid / ref_xp fields. 212 * - It initializes an empty VMM (no vsegs registered in VSL and GPT). 213 * - It initializes the FDT, defining the three pseudo files STDIN / STDOUT / STDERR. 214 * - It set the root_xp, bin_xp, cwd_xp fields. 215 * - It reset the children list as empty, but does NOT register it in parent children list. 216 * - It reset the TH_TBL list of threads as empty. 217 * - It reset the semaphore / mutex / barrier / condvar lists as empty. 218 * - It registers the process in the local_list, rooted in the local cluster manager. 219 * - It registers the process in the copies_list, rooted in the owner cluster manager. 220 * - It registers the process extended pointer in the local pref_tbl[] array. 221 ********************************************************************************************* 222 * @ process : [in] pointer on local process descriptor to initialize. 223 * @ pid : [in] process identifier. 224 * @ ppid : [in] parent process identifier. 225 * @ model_xp : [in] extended pointer on model process descriptor (local or remote). 202 226 ********************************************************************************************/ 203 227 void process_reference_init( process_t * process, 204 228 pid_t pid, 205 xptr_t parent_xp ); 229 pid_t ppid, 230 xptr_t model_xp ); 206 231 207 232 /********************************************************************************************* … … 249 274 250 275 /********************************************************************************************* 251 * This function allocates memory and initializes a new user process descriptor, 252 * and the associated main thread, from information found in the <exec_info> structure 253 * (defined in the process.h file), that must be built by the caller. 254 * - If the <keep_pid> field is true, the new process inherits its PID from the parent PID. 255 * - If the <keep_pid> field is false, a new PID is allocated from the local cluster manager. 256 * The new process inherits from the parent process (i) the open file descriptors, (ii) the 257 * vfs_root and the vfs_cwd inodes. 258 * It accesses the .elf file to get the size of the code and data segments, and initializes 259 * the vsegs list in the VMM. 260 * It is executed in the local cluster, that becomes both "owner" and "reference". 261 * - It can be called by the process_init_create() function to build the "init" process. 262 * - It can be called directly by the sys_exec() function in case of local exec. 263 * - It can be called through the rpc_process_exec_server() function in case of remote exec. 276 * This function implements the exec() system call, and is called by the sys_exec() function. 277 * It is also called by the process_init_create() function to build the "init" process. 278 * The "new" process keep the "old" process PID and PPID, all open files, and env variables, 279 * the vfs_root and vfs_cwd, but build a brand new memory image (new VMM from the new .elf). 280 * It actually creates a "new" reference process descriptor, saves all relevant information 281 * from the "old" reference process descriptor to the "new" process descriptor. 282 * It completes the "new" process descriptor, from information found in the <exec_info> 283 * structure (defined in the process.h file), that must be built by the caller. 284 * It creates and initializes the associated main thread. It finally destroys all copies 285 * of the "old" process in all clusters, and all the old associated threads. 286 * It is executed in the local cluster, that becomes both the "owner" and the "reference" 287 * cluster for the "new" process. 264 288 ********************************************************************************************* 265 289 * @ exec_info : [in] pointer on the exec_info structure. … … 268 292 error_t process_make_exec( exec_info_t * exec_info ); 269 293 294 /********************************************************************************************* 295 * This function implement the fork() system call, and is called by the sys_fork() function. 296 * It allocates memory and initializes a new "child" process descriptor, and the 297 * associated "child" thread descriptor in the local cluster. This function can involve 298 * up to three different clusters : 299 * - the local (child) cluster can be any cluster defined by the sys_fork function. 300 * - the parent cluster must be the reference clusterfor the parent process. 301 * - the client cluster containing the thread requestingthe fork can be any cluster. 302 * The new "child" process descriptor is initialised from informations found in the "parent" 303 * reference process descriptor, containing the complete process description. 304 * The new "child" thread descriptor is initialised from informations found in the "parent" 305 * thread descriptor. 306 ********************************************************************************************* 307 * @ parent_process_xp : extended pointer on the reference parent process. 308 * @ parent_thread_xp : extended pointer on the parent thread requesting the fork. 309 * @ child_pid : [out] child process identifier. 310 * @ child_thread_ptr : [out] local pointer on child thread in target cluster. 311 * @ return 0 if success / return non-zero if error. 312 ********************************************************************************************/ 313 error_t process_make_fork( xptr_t parent_process_xp, 314 xptr_t parent_thread_xp, 315 pid_t * child_pid, 316 struct thread_s ** child_thread_ptr ); 270 317 271 318 /******************** File Management Operations ****************************************/ -
trunk/kernel/kern/rpc.c
r407 r408 49 49 { 50 50 &rpc_pmem_get_pages_server, // 0 51 &rpc_process_ pid_alloc_server, // 152 &rpc_process_ exec_server,// 251 &rpc_process_make_exec_server, // 1 52 &rpc_process_make_fork_server, // 2 53 53 &rpc_process_kill_server, // 3 54 54 &rpc_thread_user_create_server, // 4 … … 78 78 &rpc_vmm_create_vseg_server, // 26 79 79 &rpc_sched_display_server, // 27 80 &rpc_ undefined,// 2880 &rpc_vmm_set_cow_server, // 28 81 81 &rpc_undefined, // 29 82 82 }; … … 148 148 149 149 ///////////////////////////////////////////////////////////////////////////////////////// 150 // [1] Marshaling functions attached to RPC_PROCESS_PID_ALLOC 151 ///////////////////////////////////////////////////////////////////////////////////////// 152 153 ////////////////////////////////////////////////// 154 void rpc_process_pid_alloc_client( cxy_t cxy, 155 process_t * process, // in 156 error_t * error, // out 157 pid_t * pid ) // out 158 { 159 rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n", 160 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 161 CURRENT_THREAD->core->lid , hal_time_stamp() ); 162 163 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); 164 165 // initialise RPC descriptor header 166 rpc_desc_t rpc; 167 rpc.index = RPC_PROCESS_PID_ALLOC; 168 rpc.response = 1; 169 170 // set input arguments in RPC descriptor 171 rpc.args[0] = (uint64_t)(intptr_t)process; 172 173 // register RPC request in remote RPC fifo (blocking function) 174 rpc_send_sync( cxy , &rpc ); 175 176 // get output arguments RPC descriptor 177 *pid = (pid_t)rpc.args[1]; 178 *error = (error_t)rpc.args[2]; 179 180 rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n", 181 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 182 CURRENT_THREAD->core->lid , hal_time_stamp() ); 183 } 184 185 ////////////////////////////////////////////// 186 void rpc_process_pid_alloc_server( xptr_t xp ) 187 { 188 process_t * process; // input : client process descriptor 189 error_t error; // output : error status 190 pid_t pid; // output : process identifier 191 192 rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n", 193 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 194 CURRENT_THREAD->core->lid , hal_time_stamp() ); 195 196 // get client cluster identifier and pointer on RPC descriptor 197 cxy_t client_cxy = (cxy_t)GET_CXY( xp ); 198 rpc_desc_t * desc = (rpc_desc_t *)GET_PTR( xp ); 199 200 // get input argument from client RPC descriptor 201 process = (process_t*)(intptr_t)hal_remote_lwd( XPTR( client_cxy , &desc->args[0] ) ); 202 203 // call local pid allocator 204 xptr_t xp_process = XPTR( client_cxy , process ); 205 error = cluster_pid_alloc( xp_process , &pid ); 206 207 // set output arguments into client RPC descriptor 208 hal_remote_sw( XPTR( client_cxy , &desc->args[0] ) , (uint64_t)error ); 209 hal_remote_sw( XPTR( client_cxy , &desc->args[1] ) , (uint64_t)pid ); 210 211 rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n", 212 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 213 CURRENT_THREAD->core->lid , hal_time_stamp() ); 214 } 215 216 217 ///////////////////////////////////////////////////////////////////////////////////////// 218 // [2] Marshaling functions attached to RPC_PROCESS_EXEC 219 ///////////////////////////////////////////////////////////////////////////////////////// 220 221 //////////////////////////////////////////////// 222 void rpc_process_exec_client( cxy_t cxy, 223 exec_info_t * info, // in 224 error_t * error ) // out 225 { 226 rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n", 227 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 228 CURRENT_THREAD->core->lid , hal_time_stamp() ); 229 230 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); 231 232 // initialise RPC descriptor header 233 rpc_desc_t rpc; 234 rpc.index = RPC_PROCESS_EXEC; 150 // [1] Marshaling functions attached to RPC_PROCESS_MAKE_EXEC 151 ///////////////////////////////////////////////////////////////////////////////////////// 152 153 ///////////////////////////////////////////////////// 154 void rpc_process_make_exec_client( cxy_t cxy, 155 exec_info_t * info, // in 156 error_t * error ) // out 157 { 158 rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n", 159 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 160 CURRENT_THREAD->core->lid , hal_time_stamp() ); 161 162 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); 163 164 // initialise RPC descriptor header 165 rpc_desc_t rpc; 166 rpc.index = RPC_PROCESS_MAKE_EXEC; 235 167 rpc.response = 1; 236 168 … … 249 181 } 250 182 251 ///////////////////////////////////////// 252 void rpc_process_ exec_server( xptr_t xp )183 ////////////////////////////////////////////// 184 void rpc_process_make_exec_server( xptr_t xp ) 253 185 { 254 186 exec_info_t * ptr; // local pointer on remote exec_info structure … … 283 215 } 284 216 217 ///////////////////////////////////////////////////////////////////////////////////////// 218 // [2] Marshaling functions attached to RPC_PROCESS_MAKE_FORK 219 ///////////////////////////////////////////////////////////////////////////////////////// 220 221 /////////////////////////////////////////////////// 222 void rpc_process_make_fork_client( cxy_t cxy, 223 xptr_t ref_process_xp, // in 224 xptr_t parent_thread_xp, // in 225 pid_t * child_pid, // out 226 thread_t ** child_thread_ptr, // out 227 error_t * error ) // out 228 { 229 rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n", 230 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 231 CURRENT_THREAD->core->lid , hal_time_stamp() ); 232 233 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); 234 235 // initialise RPC descriptor header 236 rpc_desc_t rpc; 237 rpc.index = RPC_PROCESS_MAKE_FORK; 238 rpc.response = 1; 239 240 // set input arguments in RPC descriptor 241 rpc.args[0] = (uint64_t)(intptr_t)ref_process_xp; 242 rpc.args[1] = (uint64_t)(intptr_t)parent_thread_xp; 243 244 // register RPC request in remote RPC fifo (blocking function) 245 rpc_send_sync( cxy , &rpc ); 246 247 // get output arguments from RPC descriptor 248 *child_pid = (pid_t)rpc.args[2]; 249 *child_thread_ptr = (thread_t *)(intptr_t)rpc.args[3]; 250 *error = (error_t)rpc.args[4]; 251 252 rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n", 253 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 254 CURRENT_THREAD->core->lid , hal_time_stamp() ); 255 } 256 257 ////////////////////////////////////////////// 258 void rpc_process_make_fork_server( xptr_t xp ) 259 { 260 xptr_t ref_process_xp; // extended pointer on reference parent process 261 xptr_t parent_thread_xp; // extended pointer on parent thread 262 pid_t child_pid; // child process identifier 263 thread_t * child_thread_ptr; // local copy of exec_info structure 264 error_t error; // local error status 265 266 rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n", 267 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 268 CURRENT_THREAD->core->lid , hal_time_stamp() ); 269 270 // get client cluster identifier and pointer on RPC descriptor 271 cxy_t client_cxy = (cxy_t)GET_CXY( xp ); 272 rpc_desc_t * desc = (rpc_desc_t *)GET_PTR( xp ); 273 274 // get input arguments from cient RPC descriptor 275 ref_process_xp = (xptr_t)hal_remote_lwd( XPTR( client_cxy , &desc->args[0] ) ); 276 parent_thread_xp = (xptr_t)hal_remote_lwd( XPTR( client_cxy , &desc->args[1] ) ); 277 278 // call local kernel function 279 error = process_make_fork( ref_process_xp, 280 parent_thread_xp, 281 &child_pid, 282 &child_thread_ptr ); 283 284 // set output argument into client RPC descriptor 285 hal_remote_swd( XPTR( client_cxy , &desc->args[2] ) , (uint64_t)child_pid ); 286 hal_remote_swd( XPTR( client_cxy , &desc->args[3] ) , (uint64_t)(intptr_t)child_thread_ptr ); 287 hal_remote_swd( XPTR( client_cxy , &desc->args[4] ) , (uint64_t)error ); 288 289 rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n", 290 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 291 CURRENT_THREAD->core->lid , hal_time_stamp() ); 292 } 285 293 286 294 ///////////////////////////////////////////////////////////////////////////////////////// … … 1800 1808 } 1801 1809 1810 ///////////////////////////////////////////////////////////////////////////////////////// 1811 // [28] Marshaling functions attached to RPC_VMM_SET_COW 1812 ///////////////////////////////////////////////////////////////////////////////////////// 1813 1814 ///////////////////////////////////////////// 1815 void rpc_vmm_set_cow_client( cxy_t cxy, 1816 process_t * process ) 1817 { 1818 rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n", 1819 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 1820 CURRENT_THREAD->core->lid , hal_time_stamp() ); 1821 1822 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); 1823 1824 // initialise RPC descriptor header 1825 rpc_desc_t rpc; 1826 rpc.index = RPC_VMM_SET_COW; 1827 rpc.response = 1; 1828 1829 // set input arguments in RPC descriptor 1830 rpc.args[0] = (uint64_t)(intptr_t)process; 1831 1832 // register RPC request in remote RPC fifo (blocking function) 1833 rpc_send_sync( cxy , &rpc ); 1834 1835 rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n", 1836 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 1837 CURRENT_THREAD->core->lid , hal_time_stamp() ); 1838 } 1839 1840 //////////////////////////////////////// 1841 void rpc_vmm_set_cow_server( xptr_t xp ) 1842 { 1843 rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n", 1844 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 1845 CURRENT_THREAD->core->lid , hal_time_stamp() ); 1846 1847 process_t * process; 1848 1849 // get client cluster identifier and pointer on RPC descriptor 1850 cxy_t cxy = (cxy_t)GET_CXY( xp ); 1851 rpc_desc_t * desc = (rpc_desc_t *)GET_PTR( xp ); 1852 1853 // get input arguments from client RPC descriptor 1854 process = (process_t *)(intptr_t)hal_remote_lpt( XPTR(cxy , &desc->args[0])); 1855 1856 // call local kernel function 1857 vmm_set_cow( process ); 1858 1859 rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n", 1860 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 1861 CURRENT_THREAD->core->lid , hal_time_stamp() ); 1862 } 1863 1802 1864 /***************************************************************************************/ 1803 1865 /************ Generic functions supporting RPCs : client side **************************/ … … 1835 1897 __FUNCTION__ , local_cxy , server_cxy ); 1836 1898 1837 if( thread_can_yield() ) sched_yield( );1899 if( thread_can_yield() ) sched_yield("RPC fifo full"); 1838 1900 } 1839 1901 } … … 1872 1934 1873 1935 thread_block( this , THREAD_BLOCKED_RPC ); 1874 sched_yield( );1936 sched_yield("client blocked on RPC"); 1875 1937 1876 1938 grpc_dmsg("\n[DBG] %s : core[%x,%d] / thread %s resumes after RPC completion\n", … … 1959 2021 1960 2022 // interrupted thread deschedule always 1961 sched_yield( );2023 sched_yield("IPI received"); 1962 2024 1963 2025 grpc_dmsg("\n[DBG] %s : core[%x,%d] / interrupted thread %s resume / cycle %d\n", … … 2079 2141 2080 2142 // deschedule without blocking 2081 sched_yield( );2143 sched_yield("RPC fifo empty or too much work"); 2082 2144 2083 2145 grpc_dmsg("\n[DBG] %s : core[%x,%d] (RPC thread %x) resumes / cycle %d\n", … … 2089 2151 2090 2152 2091 2092 2093 2094 2095 2096 2097 2098 /* deprecated [AG] 29/09/20172099 2100 ////////////////////////////////////////////////////2101 error_t rpc_activate_thread( remote_fifo_t * rpc_fifo )2102 {2103 core_t * core;2104 thread_t * thread;2105 thread_t * this;2106 scheduler_t * sched;2107 error_t error;2108 bool_t found;2109 reg_t sr_save;2110 2111 2112 this = CURRENT_THREAD;2113 core = this->core;2114 sched = &core->scheduler;2115 found = false;2116 2117 assert( (this->trdid == rpc_fifo->owner) , __FUNCTION__ ,2118 "calling thread is not RPC_FIFO owner\n" );2119 2120 // makes the calling thread not preemptable2121 // during activation / creation of the RPC thread2122 hal_disable_irq( &sr_save );2123 2124 grpc_dmsg("\n[DBG] %s : core[%x,%d] enter at cycle %d\n",2125 __FUNCTION__ , local_cxy , core->lid , hal_time_stamp() );2126 2127 // search one non blocked RPC thread2128 list_entry_t * iter;2129 LIST_FOREACH( &sched->k_root , iter )2130 {2131 thread = LIST_ELEMENT( iter , thread_t , sched_list );2132 if( (thread->type == THREAD_RPC) && (thread->blocked == 0 ) )2133 {2134 found = true;2135 break;2136 }2137 }2138 2139 if( found == false ) // create new RPC thread2140 {2141 error = thread_kernel_create( &thread,2142 THREAD_RPC,2143 &rpc_thread_func,2144 NULL,2145 core->lid );2146 if( error )2147 {2148 hal_restore_irq( sr_save );2149 printk("\n[ERROR] in %s : no memory for new RPC thread in cluster %x\n",2150 __FUNCTION__ , local_cxy );2151 return ENOMEM;2152 }2153 2154 // unblock thread2155 thread->blocked = 0;2156 2157 // update core descriptor counter2158 hal_atomic_add( &LOCAL_CLUSTER->rpc_threads , 1 );2159 2160 grpc_dmsg("\n[DBG] %s : core [%x,%d] creates RPC thread %x at cycle %d\n",2161 __FUNCTION__ , local_cxy , core->lid , thread->trdid , hal_time_stamp() );2162 2163 }2164 else // create a new RPC thread2165 {2166 2167 grpc_dmsg("\n[DBG] %s : core[%x,%d] activates RPC thread %x at cycle %d\n",2168 __FUNCTION__ , local_cxy , core->lid , thread->trdid , hal_time_stamp() );2169 2170 }2171 2172 // update rpc_fifo owner2173 rpc_fifo->owner = thread->trdid;2174 2175 // current thread deschedule2176 sched_yield();2177 2178 // restore IRQs for the calling thread2179 hal_restore_irq( sr_save );2180 2181 // return success2182 return 0;2183 2184 } // end rpc_activate_thread()2185 2186 ////////////////2187 void rpc_check()2188 {2189 thread_t * this = CURRENT_THREAD;2190 remote_fifo_t * rpc_fifo = &LOCAL_CLUSTER->rpc_fifo;2191 error_t error;2192 2193 grpc_dmsg("\n[DBG] %s : core[%x,%d] / thread %x / enter at cycle %d\n",2194 __FUNCTION__ , local_cxy , this->core->lid , this->trdid , hal_time_stamp() );2195 2196 // calling thread does nothing if light lock already taken or FIFO empty2197 if( (rpc_fifo->owner != 0) || (local_fifo_is_empty( &rpc_fifo->fifo )) )2198 {2199 2200 grpc_dmsg("\n[DBG] %s : core[%x,%d] / thread %x / exit do nothing at cycle %d\n",2201 __FUNCTION__ , local_cxy , this->core->lid , this->trdid , hal_time_stamp() );2202 2203 return;2204 }2205 2206 // try to take the light lock, and activates an RPC thread if success2207 if( hal_atomic_test_set( &rpc_fifo->owner , this->trdid ) )2208 {2209 error = rpc_activate_thread( rpc_fifo );2210 2211 if( error ) // cannot activate an RPC_THREAD2212 {2213 rpc_fifo->owner = 0;2214 2215 printk("\n[ERROR] in %s : no memory to create a RPC thread for core %d"2216 " in cluster %x => do nothing\n",2217 __FUNCTION__ , CURRENT_CORE->lid , local_cxy );2218 }2219 2220 return;2221 }2222 else // light lock taken by another thread2223 {2224 2225 grpc_dmsg("\n[DBG] %s : core[%x,%d] / thread %x / exit do nothing at cycle %d\n",2226 __FUNCTION__ , local_cxy , this->core->lid , this->trdid , hal_time_stamp() );2227 2228 return;2229 }2230 } // end rpc_check()2231 2232 2233 //////////////////////2234 void rpc_thread_func()2235 {2236 // makes the RPC thread not preemptable2237 hal_disable_irq( NULL );2238 2239 thread_t * this = CURRENT_THREAD;2240 remote_fifo_t * rpc_fifo = &LOCAL_CLUSTER->rpc_fifo;2241 2242 while(1)2243 {2244 // check fifo ownership (ownership should be given by rpc_activate()2245 assert( (this->trdid == rpc_fifo->owner) , __FUNCTION__ ,2246 "thread %x on core[%x,%d] not owner of RPC_FIFO / owner = %x\n",2247 this->trdid, local_cxy, this->core->lid , rpc_fifo->owner );2248 2249 // executes pending RPC(s)2250 rpc_execute_all( rpc_fifo );2251 2252 // release rpc_fifo ownership if required2253 // (this ownership can be lost during RPC execution)2254 if( rpc_fifo->owner == this->trdid ) rpc_fifo->owner = 0;2255 2256 // deschedule or sucide2257 if( LOCAL_CLUSTER->rpc_threads >= CONFIG_RPC_THREADS_MAX ) // suicide2258 {2259 2260 grpc_dmsg("\n[DBG] %s : core[%x,%d] / thread %x / suicide at cycle %d\n",2261 __FUNCTION__, local_cxy, this->core->lid, this->trdid, hal_time_stamp() );2262 2263 // update core descriptor counter2264 hal_atomic_add( &LOCAL_CLUSTER->rpc_threads , -1 );2265 2266 // suicide2267 thread_exit();2268 }2269 else // deschedule2270 {2271 2272 grpc_dmsg("\n[DBG] %s : core[%x,%d] / thread %x / deschedule at cycle %d\n",2273 __FUNCTION__, local_cxy, this->core->lid, this->trdid, hal_time_stamp() );2274 2275 sched_yield();2276 2277 grpc_dmsg("\n[DBG] %s : core[%x,%d] / thread %x / wake up at cycle %d\n",2278 __FUNCTION__, local_cxy, this->core->lid, this->trdid, hal_time_stamp() );2279 2280 }2281 } // end while2282 } // end rpc_thread_func()2283 2284 */2285 2286 -
trunk/kernel/kern/rpc.h
r407 r408 60 60 { 61 61 RPC_PMEM_GET_PAGES = 0, 62 RPC_PROCESS_ PID_ALLOC = 1,63 RPC_PROCESS_ EXEC= 2,62 RPC_PROCESS_MAKE_EXEC = 1, 63 RPC_PROCESS_MAKE_FORK = 2, 64 64 RPC_PROCESS_KILL = 3, 65 65 RPC_THREAD_USER_CREATE = 4, … … 85 85 RPC_VMM_CREATE_VSEG = 26, 86 86 RPC_SCHED_DISPLAY = 27, 87 87 RPC_VMM_SET_COW = 28, 88 88 RPC_MAX_INDEX = 30, 89 89 } … … 186 186 187 187 /*********************************************************************************** 188 * [1] The RPC_PROCESS_PID_ALLOC allocates one new PID in a remote cluster, registers 189 * the new process in the remote cluster, and returns the PID, and an error code. 188 * [1] The RPC_PROCESS_MAKE_EXEC creates a new process descriptor, from an existing 189 * process descriptor in a remote server cluster. This server cluster must be 190 * the owner cluster for the existing process. The new process descriptor is 191 * initialized from informations found in the <exec_info> structure. 192 * A new main thread descriptor is created in the server cluster. 193 * All copies of the old process descriptor and all old threads are destroyed. 190 194 *********************************************************************************** 191 195 * @ cxy : server cluster identifier. 192 * @ process : [in] local pointer on process descriptorin client cluster.196 * @ process : [in] local pointer on the exec_info structure in client cluster. 193 197 * @ error : [out] error status (0 if success). 194 * @ pid : [out] new process identifier. 195 **********************************************************************************/ 196 void rpc_process_pid_alloc_client( cxy_t cxy, 197 struct process_s * process, 198 error_t * error, 199 pid_t * pid ); 200 201 void rpc_process_pid_alloc_server( xptr_t xp ); 202 203 /*********************************************************************************** 204 * [2] The RPC_PROCESS_EXEC creates a process descriptor copy, in a remote cluster 205 * and initializes if from information found in the reference process descriptor. 206 * This remote cluster becomes the new reference cluster. 207 *********************************************************************************** 208 * @ cxy : server cluster identifier. 209 * @ info : [in] pointer on local exec_info structure. 210 * @ error : [out] error status (0 if success). 211 **********************************************************************************/ 212 void rpc_process_exec_client( cxy_t cxy, 213 struct exec_info_s * info, 214 error_t * error ); 215 216 void rpc_process_exec_server( xptr_t xp ); 198 **********************************************************************************/ 199 void rpc_process_make_exec_client( cxy_t cxy, 200 struct exec_info_s * info, 201 error_t * error ); 202 203 void rpc_process_make_exec_server( xptr_t xp ); 204 205 /*********************************************************************************** 206 * [2] The RPC_PROCESS_MAKE_FORK creates a "child" process descriptor, and the 207 * associated "child" thread descriptor in a target remote cluster that can be 208 * any cluster. The child process is initialized from informations found in the 209 * "parent" process descriptor (that must be the parent reference cluster), 210 * and from the "parent" thread descriptor that can be in any cluster. 211 *********************************************************************************** 212 * @ cxy : server cluster identifier. 213 * @ ref_process_xp : [in] extended pointer on reference parent process. 214 * @ parent_thread_xp : [in] extended pointer on parent thread. 215 * @ child_pid : [out] child process identifier. 216 * @ child_thread_ptr : [out] local pointer on child thread. 217 * @ error : [out] error status (0 if success). 218 **********************************************************************************/ 219 void rpc_process_make_fork_client( cxy_t cxy, 220 xptr_t ref_process_xp, 221 xptr_t parent_thread_xp, 222 pid_t * child_pid, 223 struct thread_s ** child_thread_ptr, 224 error_t * error ); 225 226 void rpc_process_make_fork_server( xptr_t xp ); 217 227 218 228 /*********************************************************************************** … … 613 623 void rpc_sched_display_server( xptr_t xp ); 614 624 625 /*********************************************************************************** 626 * [28] The RPC_VMM_SET_COW allows a client thread to request the remote reference 627 * cluster to set the COW flag and reset the WRITABLE flag of all GPT entries for 628 * the DATA, MMAP and REMOTE vsegs of process identified by the <process> argument. 629 630 * of a remote scheduler, identified by the <lid> argument. 631 *********************************************************************************** 632 * @ cxy : server cluster identifier. 633 * @ process : [in] local pointer on reference process descriptor. 634 **********************************************************************************/ 635 void rpc_vmm_set_cow_client( cxy_t cxy, 636 struct process_s * process ); 637 638 void rpc_vmm_set_cow_server( xptr_t xp ); 639 615 640 #endif -
trunk/kernel/kern/scheduler.c
r407 r408 128 128 } // end sched_remove() 129 129 130 //////////////////////////////////////// 131 thread_t * sched_select( core_t * core)132 { 133 thread_t * thread;134 135 scheduler_t * sched = &core->scheduler;130 ////////////////////////////////////////////// 131 thread_t * sched_select( scheduler_t * sched ) 132 { 133 thread_t * thread; 134 list_entry_t * current; 135 list_entry_t * last; 136 136 137 137 // take lock protecting sheduler lists 138 138 spinlock_lock( &sched->lock ); 139 140 list_entry_t * current;141 list_entry_t * last;142 139 143 140 // first loop : scan the kernel threads list if not empty … … 172 169 break; 173 170 174 default: // DEV thread if non blocked 175 if( thread->blocked == 0 ) 171 default: // DEV thread if non blocked and waiting queue non empty 172 if( (thread->blocked == 0) && 173 (xlist_is_empty( XPTR( local_cxy , &thread->chdev->wait_root)) == 0) ) 176 174 { 177 175 spinlock_unlock( &sched->lock ); … … 253 251 scheduler_t * sched = &core->scheduler; 254 252 253 // signal_dmsg("\n@@@ %s enter at cycle %d\n", 254 // __FUNCTION__ , hal_time_stamp() ); 255 255 256 // take lock protecting threads lists 256 257 spinlock_lock( &sched->lock ); … … 260 261 { 261 262 thread = LIST_ELEMENT( iter , thread_t , sched_list ); 262 if( thread->signals ) sched_kill_thread( thread ); 263 if( thread->signals ) // sched_kill_thread( thread ); 264 { 265 printk("\n[WARNING] %s : thread %x has signal %x at cycle %d\n", 266 __FUNCTION__, thread, thread->signals, hal_time_stamp() ); 267 } 263 268 } 264 269 … … 267 272 { 268 273 thread = LIST_ELEMENT( iter , thread_t , sched_list ); 269 if( thread->signals ) sched_kill_thread( thread ); 274 if( thread->signals ) // sched_kill_thread( thread ); 275 { 276 printk("\n[WARNING] %s : thread %x has signal %x at cycle %d\n", 277 __FUNCTION__, thread, thread->signals, hal_time_stamp() ); 278 279 } 270 280 } 271 281 … … 273 283 spinlock_unlock( &sched->lock ); 274 284 285 // signal_dmsg("\n@@@ %s exit at cycle %d\n", 286 // __FUNCTION__ , hal_time_stamp() ); 287 275 288 } // end sched_handle_signals() 276 289 277 ////////////////////////////////////// 278 void sched_update( thread_t * current, 279 thread_t * next ) 280 { 281 scheduler_t * sched = ¤t->core->scheduler; 282 283 if( current->type == THREAD_USER ) sched->u_last = ¤t->sched_list; 284 else sched->k_last = ¤t->sched_list; 285 286 sched->current = next; 287 } 288 289 ////////////////// 290 void sched_yield() 290 //////////////////////////////// 291 void sched_yield( char * cause ) 291 292 { 292 293 thread_t * next; 293 294 thread_t * current = CURRENT_THREAD; 295 scheduler_t * sched = ¤t->core->scheduler; 294 296 295 297 #if( CONFIG_SCHED_DEBUG & 0x1 ) … … 304 306 } 305 307 308 // enter critical section / save SR in current thread context 309 hal_disable_irq( ¤t->save_sr ); 310 306 311 // loop on threads to select next thread 307 next = sched_select( current->core);312 next = sched_select( sched ); 308 313 309 314 // check next thread attached to same core as the calling thread … … 319 324 if( next != current ) 320 325 { 321 // current thread desactivate IRQs 322 hal_disable_irq( &switch_save_sr[CURRENT_THREAD->core->lid] ); 323 324 sched_dmsg("\n[DBG] %s : core[%x,%d] / trd %x (%s) (%x,%x) => trd %x (%s) (%x,%x) / cycle %d\n", 325 __FUNCTION__, local_cxy, current->core->lid, 326 327 sched_dmsg("\n[DBG] %s : core[%x,%d] / cause = %s\n" 328 " thread %x (%s) (%x,%x) => thread %x (%s) (%x,%x) / cycle %d\n", 329 __FUNCTION__, local_cxy, current->core->lid, cause, 326 330 current, thread_type_str(current->type), current->process->pid, current->trdid, 327 331 next , thread_type_str(next->type) , next->process->pid , next->trdid, 328 hal_time_stamp() );332 (uint32_t)hal_get_cycles() ); 329 333 330 334 // update scheduler 331 sched_update( current , next ); 335 sched->current = next; 336 if( next->type == THREAD_USER ) sched->u_last = &next->sched_list; 337 else sched->k_last = &next->sched_list; 332 338 333 339 // handle FPU ownership … … 340 346 // switch CPU from calling thread context to new thread context 341 347 hal_do_cpu_switch( current->cpu_context, next->cpu_context ); 342 343 // restore IRQs when next thread resume344 hal_restore_irq( switch_save_sr[CURRENT_THREAD->core->lid] );345 348 } 346 349 else 347 350 { 348 351 349 sched_dmsg("\n[DBG] %s : core[%x,%d] / thread %x (%s) continue / cycle %d\n", 350 __FUNCTION__, local_cxy, current->core->lid, current->trdid, 351 thread_type_str(current->type) ,hal_time_stamp() ); 352 353 } 352 sched_dmsg("\n[DBG] %s : core[%x,%d] / cause = %s\n" 353 " thread %x (%s) (%x,%x) continue / cycle %d\n", 354 __FUNCTION__, local_cxy, current->core->lid, cause, 355 current, thread_type_str(current->type), current->process->pid, current->trdid, 356 (uint32_t)hal_get_cycles() ); 357 358 } 359 360 // exit critical section / restore SR from next thread context 361 hal_restore_irq( next->save_sr ); 362 354 363 } // end sched_yield() 355 364 … … 384 393 385 394 nolock_printk("\n***** scheduler state for core[%x,%d] at cycle %d\n" 386 "kernel_threads = %d / user_threads = %d / current = %x / idle = %x\n",395 "kernel_threads = %d / user_threads = %d / current = (%x,%x)\n", 387 396 local_cxy , core->lid, hal_time_stamp(), 388 397 sched->k_threads_nr, sched->u_threads_nr, 389 sched->current-> trdid , sched->idle->trdid );398 sched->current->process->pid , sched->current->trdid ); 390 399 391 400 // display kernel threads … … 393 402 { 394 403 thread = LIST_ELEMENT( iter , thread_t , sched_list ); 395 nolock_printk(" - type = %s / trdid = %X / pid = %X / func = %X / blocked = %X\n", 396 thread_type_str( thread->type ), thread->trdid, thread->process->pid, 397 thread->entry_func, thread->blocked ); 404 if (thread->type == THREAD_DEV) 405 { 406 nolock_printk(" - %s / pid %X / trdid %X / desc %X / blocked %X / %s\n", 407 thread_type_str( thread->type ), thread->process->pid, thread->trdid, 408 thread, thread->blocked , thread->chdev->name ); 409 } 410 else 411 { 412 nolock_printk(" - %s / pid %X / trdid %X / desc %X / blocked %X\n", 413 thread_type_str( thread->type ), thread->process->pid, thread->trdid, 414 thread, thread->blocked ); 415 } 398 416 } 399 417 … … 402 420 { 403 421 thread = LIST_ELEMENT( iter , thread_t , sched_list ); 404 nolock_printk(" - type = %s / trdid = %X / pid = %X / func = %X / blocked =%X\n",405 thread_type_str( thread->type ), thread-> trdid, thread->process->pid,406 thread ->entry_func, thread->blocked );422 nolock_printk(" - %s / pid %X / trdid %X / desc %X / blocked %X\n", 423 thread_type_str( thread->type ), thread->process->pid, thread->trdid, 424 thread, thread->blocked ); 407 425 } 408 426 -
trunk/kernel/kern/scheduler.h
r407 r408 74 74 75 75 /********************************************************************************************* 76 * This function handles pending signals for all registered threads, and calls the 77 * sched_select() function to make a context switch for the core running the calling thread. 76 * This function is the only method to make a context switch. It is called in cas of TICK, 77 * or when when a thread explicitely requires a scheduling. 78 * It handles the pending signals for all threads attached to the core running the calling 79 * thread, and calls the sched_select() function to select a new thread. 80 * The cause argument is only used for debug by the sched_display() function, and 81 * indicates the scheduling cause. 82 ********************************************************************************************* 83 * @ cause : character string defining the scheduling cause. 78 84 ********************************************************************************************/ 79 void sched_yield( );85 void sched_yield( char * cause ); 80 86 81 87 /********************************************************************************************* … … 101 107 /********************************************************************************************* 102 108 * This function does NOT modify the scheduler state. 103 * It just select a thread in the list of attached threads, implementing the following policy: 104 * 1) it scan the list of kernel threads, from the next thread after the last executed one, 105 * and returns the first runnable found (can be the current thread). 106 * 2) if no kernel thread found, it scan the list of user thread, from the next thread after 107 * the last executed one, and returns the first runable found (can be the current thread). 108 * 3) if no runable thread found, it returns the idle thread. 109 * It just select a thread in the list of attached threads, implementing the following 110 * three steps policy: 111 * 1) It scan the list of kernel threads, from the next thread after the last executed one, 112 * and returns the first runnable found : not IDLE, not blocked, client queue not empty. 113 * It can be the current thread. 114 * 2) If no kernel thread found, it scan the list of user thread, from the next thread after 115 * the last executed one, and returns the first runable found : not blocked. 116 * It can be the current thread. 117 * 3) If no runable thread found, it returns the idle thread. 109 118 ********************************************************************************************* 110 * @ core : local pointer on the core descriptor.119 * @ core : local pointer on scheduler. 111 120 * @ returns pointer on selected thread descriptor 112 121 ********************************************************************************************/ 113 struct thread_s * sched_select( struct core_s * core);122 struct thread_s * sched_select( struct scheduler_s * sched ); 114 123 115 124 /********************************************************************************************* -
trunk/kernel/kern/thread.c
r407 r408 116 116 // - thread_user_fork() 117 117 // - thread_kernel_create() 118 // - thread_user_init()119 118 ///////////////////////////////////////////////////////////////////////////////////// 120 119 // @ thread : pointer on thread descriptor … … 200 199 thread->signature = THREAD_SIGNATURE; 201 200 201 // FIXME call hal_thread_init() function to initialise the save_sr field 202 thread->save_sr = 0xFF13; 203 202 204 // update local DQDT 203 205 dqdt_local_update_threads( 1 ); … … 322 324 } 323 325 326 // update DQDT for new thread 327 dqdt_local_update_threads( 1 ); 328 324 329 thread_dmsg("\n[DBG] %s : core[%x,%d] exit / trdid = %x / process %x / core = %d\n", 325 330 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, … … 331 336 } // end thread_user_create() 332 337 333 //////////////////////////////////////////////////// 334 error_t thread_user_fork( process_t * process, 335 intptr_t stack_base, 336 uint32_t stack_size, 337 thread_t ** new_thread ) 338 /////////////////////////////////////////////////////// 339 error_t thread_user_fork( xptr_t parent_thread_xp, 340 process_t * child_process, 341 thread_t ** child_thread ) 338 342 { 339 343 error_t error; 340 thread_t * child; // pointer on new thread descriptor 341 lid_t core_lid; // selected core local index 342 343 thread_dmsg("\n[DBG] %s : core[%x,%d] enters\n", 344 __FUNCTION__ , local_cxy , core_lid ); 344 thread_t * child_ptr; // local pointer on local child thread 345 lid_t core_lid; // selected core local index 346 347 thread_t * parent_ptr; // local pointer on remote parent thread 348 cxy_t parent_cxy; // parent thread cluster 349 process_t * parent_process; // local pointer on parent process 350 xptr_t parent_gpt_xp; // extended pointer on parent thread GPT 351 352 void * func; // parent thread entry_func 353 void * args; // parent thread entry_args 354 intptr_t base; // parent thread u_stack_base 355 uint32_t size; // parent thread u_stack_size 356 uint32_t flags; // parent_thread flags 357 vpn_t vpn_base; // parent thread stack vpn_base 358 vpn_t vpn_size; // parent thread stack vpn_size 359 reg_t * uzone; // parent thread pointer on uzone 360 361 vseg_t * vseg; // child thread STACK vseg 362 363 thread_dmsg("\n[DBG] %s : core[%x,%d] enters at cycle %d\n", 364 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , hal_get_cycles() ); 345 365 346 366 // select a target core in local cluster 347 367 core_lid = cluster_select_local_core(); 348 368 349 // get pointer on parent thread descriptor 350 thread_t * parent = CURRENT_THREAD; 351 352 // allocate memory for new thread descriptor 353 child = thread_alloc(); 354 355 if( child == NULL ) 369 // get cluster and local pointer on parent thread descriptor 370 parent_cxy = GET_CXY( parent_thread_xp ); 371 parent_ptr = (thread_t *)GET_PTR( parent_thread_xp ); 372 373 // get relevant fields from parent thread 374 func = (void *) hal_remote_lpt( XPTR( parent_cxy , &parent_ptr->entry_func ) ); 375 args = (void *) hal_remote_lpt( XPTR( parent_cxy , &parent_ptr->entry_args ) ); 376 base = (intptr_t)hal_remote_lpt( XPTR( parent_cxy , &parent_ptr->u_stack_base ) ); 377 size = (uint32_t)hal_remote_lw ( XPTR( parent_cxy , &parent_ptr->u_stack_size ) ); 378 flags = hal_remote_lw ( XPTR( parent_cxy , &parent_ptr->flags ) ); 379 uzone = (reg_t *) hal_remote_lpt( XPTR( parent_cxy , &parent_ptr->uzone ) ); 380 381 vpn_base = base >> CONFIG_PPM_PAGE_SHIFT; 382 vpn_size = size >> CONFIG_PPM_PAGE_SHIFT; 383 384 // get pointer on parent process in parent thread cluster 385 parent_process = (process_t *)hal_remote_lpt( XPTR( parent_cxy, 386 &parent_ptr->process ) ); 387 388 // get extended pointer on parent GPT in parent thread cluster 389 parent_gpt_xp = XPTR( parent_cxy , &parent_process->vmm.gpt ); 390 391 // allocate memory for child thread descriptor 392 child_ptr = thread_alloc(); 393 if( child_ptr == NULL ) 356 394 { 357 395 printk("\n[ERROR] in %s : cannot allocate new thread\n", __FUNCTION__ ); 358 return ENOMEM;396 return -1; 359 397 } 360 398 361 399 // initialize thread descriptor 362 error = thread_init( child ,363 process,400 error = thread_init( child_ptr, 401 child_process, 364 402 THREAD_USER, 365 parent->entry_func,366 parent->entry_args,403 func, 404 args, 367 405 core_lid, 368 stack_base, 369 stack_size ); 370 406 base, 407 size ); 371 408 if( error ) 372 409 { 373 printk("\n[ERROR] in %s : cannot initialize newthread\n", __FUNCTION__ );374 thread_release( child );410 printk("\n[ERROR] in %s : cannot initialize child thread\n", __FUNCTION__ ); 411 thread_release( child_ptr ); 375 412 return EINVAL; 376 413 } 377 414 378 415 // return child pointer 379 *new_thread = child; 380 381 // set DETACHED flag if required 382 if( parent->flags & THREAD_FLAG_DETACHED ) child->flags = THREAD_FLAG_DETACHED; 416 *child_thread = child_ptr; 417 418 // set detached flag if required 419 if( flags & THREAD_FLAG_DETACHED ) child_ptr->flags = THREAD_FLAG_DETACHED; 420 421 // update uzone pointer in child thread descriptor 422 child_ptr->uzone = (char *)((intptr_t)uzone + 423 (intptr_t)child_ptr - 424 (intptr_t)parent_ptr ); 425 383 426 384 427 // allocate CPU context for child thread 385 if( hal_cpu_context_alloc( child ) )428 if( hal_cpu_context_alloc( child_ptr ) ) 386 429 { 387 430 printk("\n[ERROR] in %s : cannot allocate CPU context\n", __FUNCTION__ ); 388 thread_release( child );389 return ENOMEM;431 thread_release( child_ptr ); 432 return -1; 390 433 } 391 434 392 435 // allocate FPU context for child thread 393 if( hal_fpu_context_alloc( child ) )436 if( hal_fpu_context_alloc( child_ptr ) ) 394 437 { 395 438 printk("\n[ERROR] in %s : cannot allocate FPU context\n", __FUNCTION__ ); 396 thread_release( child ); 397 return ENOMEM; 398 } 399 400 // copy kernel stack content from parent to child thread descriptor 401 void * dst = (void *)(&child->signature) + 4; 402 void * src = (void *)(&parent->signature) + 4; 403 memcpy( dst , src , parent->k_stack_size ); 439 thread_release( child_ptr ); 440 return -1; 441 } 442 443 // create and initialize STACK vseg 444 vseg = vseg_alloc(); 445 vseg_init( vseg, 446 VSEG_TYPE_STACK, 447 base, 448 size, 449 vpn_base, 450 vpn_size, 451 0, 0, XPTR_NULL, // not a file vseg 452 local_cxy ); 453 454 // register STACK vseg in local child VSL 455 vseg_attach( &child_process->vmm , vseg ); 456 457 // copy all valid STACK GPT entries 458 vpn_t vpn; 459 bool_t mapped; 460 ppn_t ppn; 461 for( vpn = vpn_base ; vpn < (vpn_base + vpn_size) ; vpn++ ) 462 { 463 error = hal_gpt_pte_copy( &child_process->vmm.gpt, 464 parent_gpt_xp, 465 vpn, 466 true, // set cow 467 &ppn, 468 &mapped ); 469 if( error ) 470 { 471 vseg_detach( &child_process->vmm , vseg ); 472 vseg_free( vseg ); 473 thread_release( child_ptr ); 474 printk("\n[ERROR] in %s : cannot update child GPT\n", __FUNCTION__ ); 475 return -1; 476 } 477 478 // increment page descriptor fork_nr for the referenced page if mapped 479 if( mapped ) 480 { 481 xptr_t page_xp = ppm_ppn2page( ppn ); 482 cxy_t page_cxy = GET_CXY( page_xp ); 483 page_t * page_ptr = (page_t *)GET_PTR( page_xp ); 484 hal_remote_atomic_add( XPTR( page_cxy , &page_ptr->fork_nr ) , 1 ); 485 486 thread_dmsg("\n[DBG] %s : core[%x,%d] copied PTE to child GPT : vpn %x\n", 487 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , vpn ); 488 489 } 490 } 491 492 // set COW flag for STAK vseg in parent thread GPT 493 hal_gpt_flip_cow( true, // set cow 494 parent_gpt_xp, 495 vpn_base, 496 vpn_size ); 497 498 // update DQDT for child thread 499 dqdt_local_update_threads( 1 ); 404 500 405 501 thread_dmsg("\n[DBG] %s : core[%x,%d] exit / created main thread %x for process %x\n", 406 __FUNCTION__, local_cxy , core_lid , child->trdid ,process->pid );502 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, child_ptr->trdid, child_process->pid ); 407 503 408 504 return 0; … … 452 548 hal_cpu_context_create( thread ); 453 549 550 // update DQDT for kernel thread 551 dqdt_local_update_threads( 1 ); 552 454 553 thread_dmsg("\n[DBG] %s : core = [%x,%d] exit / trdid = %x / type %s / cycle %d\n", 455 554 __FUNCTION__, local_cxy, core_lid, thread->trdid, thread_type_str(type), hal_time_stamp() ); … … 511 610 512 611 // update intrumentation values 513 uint32_t pgfaults = thread->info.pgfault_nr; 514 uint32_t u_errors = thread->info.u_err_nr; 515 uint32_t m_errors = thread->info.m_err_nr; 516 517 process->vmm.pgfault_nr += pgfaults; 518 process->vmm.u_err_nr += u_errors; 519 process->vmm.m_err_nr += m_errors; 612 process->vmm.pgfault_nr += thread->info.pgfault_nr; 520 613 521 614 // release memory allocated for CPU context and FPU context … … 635 728 { 636 729 this->flags &= ~THREAD_FLAG_SCHED; 637 sched_yield( );730 sched_yield( "delayed scheduling" ); 638 731 } 639 732 … … 697 790 698 791 // deschedule 699 sched_yield( );792 sched_yield( "exit" ); 700 793 return 0; 701 794 … … 721 814 while( 1 ) 722 815 { 816 // unmask IRQs 817 hal_enable_irq( NULL ); 818 723 819 if( CONFIG_THREAD_IDLE_MODE_SLEEP ) // force core to low-power mode 724 820 { … … 740 836 741 837 // force scheduling at each iteration 742 sched_yield( );838 sched_yield( "idle" ); 743 839 } 744 840 } // end thread_idle() … … 754 850 /////////////////////////////////////////////////// 755 851 void thread_kernel_time_update( thread_t * thread ) 756 {757 // TODO758 // printk("\n[WARNING] function %s not implemented\n", __FUNCTION__ );759 }760 761 ////////////////////////////////////////////////762 void thread_signals_handle( thread_t * thread )763 852 { 764 853 // TODO -
trunk/kernel/kern/thread.h
r407 r408 34 34 #include <spinlock.h> 35 35 #include <core.h> 36 #include <chdev.h> 36 37 #include <cluster.h> 37 38 #include <process.h> … … 96 97 #define THREAD_BLOCKED_RPC 0x0200 /*! thread wait RPC completion */ 97 98 98 #define THREAD_BLOCKED_DEV_QUEUE 0x2000 /*! thread DEV wait queue */99 99 #define THREAD_BLOCKED_DEV_ISR 0x4000 /*! thread DEV wait ISR */ 100 100 … … 132 132 * thread is registered in the local copy of the process descriptor. 133 133 * 134 * WARNING : Don't modify the first 4fields order, as this order is used by the134 * WARNING : Don't modify the first 3 fields order, as this order is used by the 135 135 * hal_kentry assembly code for the TSAR architecture. 136 136 **************************************************************************************/ … … 140 140 typedef struct thread_s 141 141 { 142 void * cpu_context; /*! used for context switch */ 143 void * fpu_context; /*! used for dynamic FPU allocation */ 142 void * cpu_context; /*! pointer on CPU context switch */ 143 void * fpu_context; /*! pointer on FPU context switch */ 144 void * uzone; /*! pointer on uzone for hal_kentry */ 144 145 145 146 intptr_t k_stack_base; /*! kernel stack base address */ … … 172 173 173 174 uint32_t flags; /*! bit vector of flags */ 174 volatile uint32_t blocked; /*! bit vector of blocking causes*/175 volatile uint32_t signals; /*! bit vector of (KILL / SUICIDE) signals*/175 uint32_t signals; /*! bit vector of (KILL / SUICIDE) signals */ 176 uint32_t blocked; /*! bit vector of blocking causes */ 176 177 177 178 error_t errno; /*! errno value set by last system call */ … … 189 190 list_entry_t sched_list; /*! member of threads attached to same core */ 190 191 191 uint32_t dev_channel; /*! device channel for a DEV thread */ 192 chdev_t * chdev; /*! chdev pointer (for a DEV thread only) */ 193 194 reg_t save_sr; /*! used by sched_yield() function */ 192 195 193 196 ioc_command_t ioc_cmd; /*! IOC device generic command */ … … 222 225 223 226 /*************************************************************************************** 224 * This function allocates memory for a user thread descriptor in the local cluster,225 * and initializes it from information contained in the arguments.226 * It is used by the "pthread_create" system call.227 * The CPU context is initialized from scratch , and the "loadable" field is set.228 * The new thread is attached to the core specified in the <attr> argument.227 * This function is used by the pthread_create() system call to create a "new" thread 228 * in an existing process. It allocates memory for an user thread descriptor in the 229 * local cluster, and initializes it from information contained in the arguments. 230 * The CPU context is initialized from scratch. If required by the <attr> argument, 231 * the new thread is attached to the core specified in <attr>. 229 232 * It is registered in the local process descriptor specified by the <pid> argument. 230 233 * The thread descriptor pointer is returned to allow the parent thread to register it … … 246 249 247 250 /*************************************************************************************** 248 * This function is used by the fork() system call to create the child process main249 * thread. It allocates memory for an user thread descriptor in the local cluster,250 * and initializes it from information contained in the calling thread descriptor.251 * This function is used by the sys_fork() system call to create the "child" thread 252 * in the local cluster. It allocates memory for a thread descriptor, and initializes 253 * it from the "parent" thread descriptor defined by the <parent_thread_xp> argument. 251 254 * The new thread is attached to the core that has the lowest load in local cluster. 252 * It is registered in the child process descriptor defined by the <process> argument.255 * It is registered in the "child" process defined by the <child_process> argument. 253 256 * This new thread inherits its user stack from the parent thread, as it uses the 254 257 * Copy-On-Write mechanism to get a private stack when required. … … 256 259 * the Copy-On-Write mechanism cannot be used for kernel segments (because kernel 257 260 * uses physical addressing on some architectures). 258 * The CPU and FPU execution contexts are created and linked to the new thread, 259 * but the actual context copy is NOT done. The THREAD_BLOCKED_GLOBAL bit is set, 260 * and the thread must be explicitely unblocked later to make the new thread runable. 261 *************************************************************************************** 262 * @ process : local pointer on owner process descriptor. 263 * @ stack_base : user stack base address (from parent). 264 * @ stack_size : user stack size (from parent). 265 * @ new_thread : [out] address of buffer for new thread descriptor pointer. 266 * @ returns 0 if success / returns ENOMEM if error. 267 **************************************************************************************/ 268 error_t thread_user_fork( process_t * process, 269 intptr_t stack_base, 270 uint32_t stack_size, 271 thread_t ** new_thread ); 261 * The CPU and FPU execution contexts are created and linked to the new thread. 262 * but the actual context copy is NOT done, and must be done by by the sys_fork(). 263 * The THREAD_BLOCKED_GLOBAL bit is set => the thread must be activated to start. 264 *************************************************************************************** 265 * @ parent_thread_xp : extended pointer on parent thread descriptor. 266 * @ child_process : local pointer on child process descriptor. 267 * @ child_thread : [out] address of buffer for child thread descriptor pointer. 268 * @ returns 0 if success / returns -1 if error. 269 **************************************************************************************/ 270 error_t thread_user_fork( xptr_t parent_thread_xp, 271 process_t * child_process, 272 thread_t ** child_thread ); 272 273 273 274 /*************************************************************************************** 274 275 * This function allocates memory for a kernel thread descriptor in the local cluster, 275 * and initializes it from arguments values, calling the thread_kernel_init() function, 276 * that also allocates and initializes the CPU context. 276 * and initializes it from arguments values. 277 * It is called by kernel_init() to statically create all DEV server threads 278 * It is also called to dynamically create RPC threads when required. 277 279 * The THREAD_BLOCKED_GLOBAL bit is set, and the thread must be activated to start. 278 280 *************************************************************************************** … … 291 293 292 294 /*************************************************************************************** 293 * This function initializes an existing kernelthread descriptor from arguments values.295 * This function initializes an existing thread descriptor from arguments values. 294 296 * The THREAD_BLOCKED_GLOBAL bit is set, and the thread must be activated to start. 297 * It is called by the kernel_init() function to initialize the IDLE thread. 295 298 *************************************************************************************** 296 299 * @ thread : pointer on existing thread descriptor. -
trunk/kernel/libk/remote_barrier.c
r407 r408 274 274 // block & deschedule the calling thread 275 275 thread_block( thread_ptr , THREAD_BLOCKED_USERSYNC ); 276 sched_yield( );276 sched_yield("blocked on barrier"); 277 277 278 278 // restore interrupts -
trunk/kernel/libk/remote_condvar.c
r407 r408 189 189 // block the calling thread 190 190 thread_block( CURRENT_THREAD , THREAD_BLOCKED_USERSYNC ); 191 sched_yield( );191 sched_yield("blocked on condvar"); 192 192 193 193 // lock the mutex before return -
trunk/kernel/libk/remote_fifo.c
r407 r408 89 89 // - deschedule without blocking if possible 90 90 // - wait ~1000 cycles otherwise 91 if( thread_can_yield() ) sched_yield( );91 if( thread_can_yield() ) sched_yield( "wait RPC fifo" ); 92 92 else hal_fixed_delay( 1000 ); 93 93 -
trunk/kernel/libk/remote_mutex.c
r407 r408 208 208 // block & deschedule the calling thread 209 209 thread_block( thread_ptr , THREAD_BLOCKED_USERSYNC ); 210 sched_yield( );210 sched_yield("blocked on mutex"); 211 211 212 212 // restore interrupts -
trunk/kernel/libk/remote_sem.c
r407 r408 219 219 // block and deschedule 220 220 thread_block( this , THREAD_BLOCKED_SEM ); 221 sched_yield( );221 sched_yield("blocked on semaphore"); 222 222 } 223 223 } // end remote_sem_wait() -
trunk/kernel/libk/remote_spinlock.c
r407 r408 179 179 { 180 180 hal_restore_irq( mode ); 181 if( thread_can_yield() ) sched_yield( );181 if( thread_can_yield() ) sched_yield("waiting spinlock"); 182 182 hal_disable_irq( &mode ); 183 183 continue; -
trunk/kernel/libk/spinlock.c
r407 r408 111 111 { 112 112 hal_restore_irq( mode ); 113 if( thread_can_yield() ) sched_yield( );113 if( thread_can_yield() ) sched_yield("waiting spinlock"); 114 114 hal_disable_irq( &mode ); 115 115 continue; -
trunk/kernel/mm/mapper.c
r407 r408 240 240 rwlock_wr_unlock( &mapper->lock ); 241 241 242 // deschedule towait load completion242 // wait load completion 243 243 while( 1 ) 244 244 { 245 245 // exit waiting loop when loaded 246 if( page_is_flag( page , PG_INLOAD )) break;246 if( page_is_flag( page , PG_INLOAD ) == false ) break; 247 247 248 248 // deschedule 249 sched_yield( );249 sched_yield("waiting page loading"); 250 250 } 251 251 } … … 253 253 else // page available in mapper 254 254 { 255 256 255 rwlock_rd_unlock( &mapper->lock ); 257 256 } -
trunk/kernel/mm/page.c
r407 r408 47 47 page->index = 0; 48 48 page->refcount = 0; 49 page->fork_nr = 0; 49 50 50 51 spinlock_init( &page->lock ); … … 180 181 // deschedule the calling thread 181 182 thread_block( thread , THREAD_BLOCKED_PAGE ); 182 sched_yield( );183 sched_yield("cannot lock a page"); 183 184 } 184 185 else // page is not locked -
trunk/kernel/mm/page.h
r407 r408 67 67 xlist_entry_t wait_root; /*! root of list of waiting threads (16) */ 68 68 uint32_t refcount; /*! reference counter (4) */ 69 uint32_t reserved; /*! UNUSED(4) */69 uint32_t fork_nr; /*! number of pending forks (4) */ 70 70 spinlock_t lock; /*! only used to set the PG_LOCKED flag (16) */ 71 71 } -
trunk/kernel/mm/vmm.c
r407 r408 32 32 #include <rwlock.h> 33 33 #include <list.h> 34 #include <xlist.h> 34 35 #include <bits.h> 35 36 #include <process.h> … … 69 70 // initialize local list of vsegs 70 71 vmm->vsegs_nr = 0; 71 list_root_init( &vmm->vsegs_root);72 r wlock_init( &vmm->vsegs_lock);72 xlist_root_init( XPTR( local_cxy , &vmm->vsegs_root ) ); 73 remote_rwlock_init( XPTR( local_cxy , &vmm->vsegs_lock ) ); 73 74 74 75 assert( ((CONFIG_VMM_KENTRY_SIZE + CONFIG_VMM_ARGS_SIZE + CONFIG_VMM_ENVS_SIZE) … … 154 155 // initialize instrumentation counters 155 156 vmm->pgfault_nr = 0; 156 vmm->u_err_nr = 0;157 vmm->m_err_nr = 0;158 157 159 158 hal_fence(); … … 176 175 177 176 // get lock protecting the vseg list 178 r wlock_rd_lock( &vmm->vsegs_lock);177 remote_rwlock_rd_lock( XPTR( local_cxy , &vmm->vsegs_lock ) ); 179 178 180 179 // scan the list of vsegs 181 list_entry_t * iter; 180 xptr_t root_xp = XPTR( local_cxy , &vmm->vsegs_root ); 181 xptr_t iter_xp; 182 xptr_t vseg_xp; 182 183 vseg_t * vseg; 183 LIST_FOREACH( &vmm->vsegs_root , iter ) 184 { 185 vseg = LIST_ELEMENT( iter , vseg_t , list ); 184 XLIST_FOREACH( root_xp , iter_xp ) 185 { 186 vseg_xp = XLIST_ELEMENT( iter_xp , vseg_t , xlist ); 187 vseg = (vseg_t *)GET_PTR( vseg_xp ); 188 186 189 printk(" - %s : base = %X / size = %X / npages = %d\n", 187 190 vseg_type_str( vseg->type ) , vseg->min , vseg->max - vseg->min , vseg->vpn_size ); … … 206 209 207 210 // release the lock 208 rwlock_rd_unlock( &vmm->vsegs_lock ); 209 } 210 211 ////////////////////////////////////////// 212 error_t vmm_copy( process_t * dst_process, 213 process_t * src_process ) 214 { 215 error_t error; 216 217 vmm_t * src_vmm = &src_process->vmm; 218 vmm_t * dst_vmm = &dst_process->vmm; 219 220 // take the src_vmm vsegs_lock 221 rwlock_wr_lock( &src_vmm->vsegs_lock ); 222 223 // initialize dst_vmm vsegs_lock 224 rwlock_init( &dst_vmm->vsegs_lock ); 225 226 // initialize the dst_vmm vsegs list 227 dst_vmm->vsegs_nr = 0; 228 list_root_init( &dst_vmm->vsegs_root ); 229 230 // initialize generic page table 231 error = hal_gpt_create( &dst_vmm->gpt ); 232 211 remote_rwlock_rd_unlock( XPTR( local_cxy , &vmm->vsegs_lock ) ); 212 213 } // vmm_display() 214 215 /////////////////////i//////////////////// 216 void vmm_update_pte( process_t * process, 217 vpn_t vpn, 218 uint32_t attr, 219 ppn_t ppn ) 220 { 221 222 xlist_entry_t * process_root_ptr; 223 xptr_t process_root_xp; 224 xptr_t process_iter_xp; 225 226 xptr_t remote_process_xp; 227 cxy_t remote_process_cxy; 228 process_t * remote_process_ptr; 229 xptr_t remote_gpt_xp; 230 231 pid_t pid; 232 cxy_t owner_cxy; 233 lpid_t owner_lpid; 234 235 // get extended pointer on root of process copies xlist in owner cluster 236 pid = process->pid; 237 owner_cxy = CXY_FROM_PID( pid ); 238 owner_lpid = LPID_FROM_PID( pid ); 239 process_root_ptr = &LOCAL_CLUSTER->pmgr.copies_root[owner_lpid]; 240 process_root_xp = XPTR( owner_cxy , process_root_ptr ); 241 242 // loop on destination process copies 243 XLIST_FOREACH( process_root_xp , process_iter_xp ) 244 { 245 // get cluster and local pointer on remote process 246 remote_process_xp = XLIST_ELEMENT( process_iter_xp , process_t , copies_list ); 247 remote_process_ptr = (process_t *)GET_PTR( remote_process_xp ); 248 remote_process_cxy = GET_CXY( remote_process_xp ); 249 250 // get extended pointer on remote gpt 251 remote_gpt_xp = XPTR( remote_process_cxy , &remote_process_ptr->vmm.gpt ); 252 253 hal_gpt_update_pte( remote_gpt_xp, 254 vpn, 255 attr, 256 ppn ); 257 } 258 } // end vmm_update_pte() 259 260 /////////////////////////////////////// 261 void vmm_set_cow( process_t * process ) 262 { 263 vmm_t * vmm; 264 265 xlist_entry_t * process_root_ptr; 266 xptr_t process_root_xp; 267 xptr_t process_iter_xp; 268 269 xptr_t remote_process_xp; 270 cxy_t remote_process_cxy; 271 process_t * remote_process_ptr; 272 xptr_t remote_gpt_xp; 273 274 xptr_t vseg_root_xp; 275 xptr_t vseg_iter_xp; 276 277 xptr_t vseg_xp; 278 vseg_t * vseg; 279 280 pid_t pid; 281 cxy_t owner_cxy; 282 lpid_t owner_lpid; 283 284 vmm_dmsg("\n[DBG] %s : core[%x,%d] enters for process %x\n", 285 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , process->pid ); 286 287 // check cluster is reference 288 assert( (GET_CXY( process->ref_xp ) == local_cxy) , __FUNCTION__, 289 "local cluster is not process reference cluster\n"); 290 291 // get pointer on reference VMM 292 vmm = &process->vmm; 293 294 // get extended pointer on root of process copies xlist in owner cluster 295 pid = process->pid; 296 owner_cxy = CXY_FROM_PID( pid ); 297 owner_lpid = LPID_FROM_PID( pid ); 298 process_root_ptr = &LOCAL_CLUSTER->pmgr.copies_root[owner_lpid]; 299 process_root_xp = XPTR( owner_cxy , process_root_ptr ); 300 301 // get extended pointer on root of vsegs xlist from reference VMM 302 vseg_root_xp = XPTR( local_cxy , &vmm->vsegs_root ); 303 304 // loop on destination process copies 305 XLIST_FOREACH( process_root_xp , process_iter_xp ) 306 { 307 // get cluster and local pointer on remote process 308 remote_process_xp = XLIST_ELEMENT( process_iter_xp , process_t , copies_list ); 309 remote_process_ptr = (process_t *)GET_PTR( remote_process_xp ); 310 remote_process_cxy = GET_CXY( remote_process_xp ); 311 312 vmm_dmsg("\n[DBG] %s : core[%x,%d] handling process %x in cluster %x\n", 313 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , process->pid , remote_process_cxy ); 314 315 // get extended pointer on remote gpt 316 remote_gpt_xp = XPTR( remote_process_cxy , &remote_process_ptr->vmm.gpt ); 317 318 // loop on vsegs in (local) reference process VSL 319 XLIST_FOREACH( vseg_root_xp , vseg_iter_xp ) 320 { 321 // get pointer on vseg 322 vseg_xp = XLIST_ELEMENT( vseg_iter_xp , vseg_t , xlist ); 323 vseg = (vseg_t *)GET_PTR( vseg_xp ); 324 325 assert( (GET_CXY( vseg_xp ) == local_cxy) , __FUNCTION__, 326 "all vsegs in reference VSL must be local\n" ); 327 328 // get vseg type, base and size 329 uint32_t type = vseg->type; 330 vpn_t vpn_base = vseg->vpn_base; 331 vpn_t vpn_size = vseg->vpn_size; 332 333 vmm_dmsg("\n[DBG] %s : core[%x,%d] handling vseg %s / vpn_base = %x / vpn_size = %x\n", 334 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, vseg_type_str(type), vpn_base, vpn_size ); 335 336 // set COW flag on the remote GPT depending on vseg type 337 if( (type == VSEG_TYPE_DATA) || 338 (type == VSEG_TYPE_ANON) || 339 (type == VSEG_TYPE_REMOTE) ) 340 { 341 hal_gpt_flip_cow( true, // set_cow 342 remote_gpt_xp, 343 vpn_base, 344 vpn_size ); 345 } 346 } // en loop on vsegs 347 } // end loop on process copies 348 349 vmm_dmsg("\n[DBG] %s : core[%x,%d] exit for process %x\n", 350 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , process->pid ); 351 352 } // end vmm_set-cow() 353 354 ///////////////////////////////////////////////// 355 error_t vmm_fork_copy( process_t * child_process, 356 xptr_t parent_process_xp ) 357 { 358 error_t error; 359 cxy_t parent_cxy; 360 process_t * parent_process; 361 vmm_t * parent_vmm; 362 xptr_t parent_lock_xp; 363 vmm_t * child_vmm; 364 xptr_t iter_xp; 365 xptr_t parent_vseg_xp; 366 vseg_t * parent_vseg; 367 vseg_t * child_vseg; 368 uint32_t type; 369 bool_t cow; 370 vpn_t vpn; 371 vpn_t vpn_base; 372 vpn_t vpn_size; 373 xptr_t page_xp; 374 page_t * page_ptr; 375 cxy_t page_cxy; 376 xptr_t parent_root_xp; 377 bool_t mapped; 378 ppn_t ppn; 379 380 vmm_dmsg("\n[DBG] %s : core[%x,%d] enter\n", 381 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid ); 382 383 // get parent process cluster and local pointer 384 parent_cxy = GET_CXY( parent_process_xp ); 385 parent_process = (process_t *)GET_PTR( parent_process_xp ); 386 387 // get local pointers on parent and child VMM 388 parent_vmm = &parent_process->vmm; 389 child_vmm = &child_process->vmm; 390 391 // get extended pointer on lock protecting the parent VSL 392 parent_lock_xp = XPTR( parent_cxy , &parent_vmm->vsegs_lock ); 393 394 // take the lock protecting the parent VSL 395 remote_rwlock_rd_lock( parent_lock_xp ); 396 397 // initialize the lock protecting the child VSL 398 remote_rwlock_init( XPTR( local_cxy , &child_vmm->vsegs_lock ) ); 399 400 // initialize the child VSL as empty 401 xlist_root_init( XPTR( local_cxy, &child_vmm->vsegs_root ) ); 402 child_vmm->vsegs_nr = 0; 403 404 // create & initialize the child GPT as empty 405 error = hal_gpt_create( &child_vmm->gpt ); 233 406 if( error ) 234 407 { 235 printk("\n[ERROR] in %s : cannot initialize page table\n", __FUNCTION__ ); 236 return ENOMEM; 237 } 238 239 // loop on SRC VSL to register vsegs copies in DST VSL 240 // and copy valid PTEs from SRC GPT to DST GPT 241 list_entry_t * iter; 242 vseg_t * src_vseg; 243 vseg_t * dst_vseg; 244 LIST_FOREACH( &src_vmm->vsegs_root , iter ) 245 { 246 // get pointer on current src_vseg 247 src_vseg = LIST_ELEMENT( iter , vseg_t , list ); 248 249 // allocate memory for a new dst_vseg 250 dst_vseg = vseg_alloc(); 251 252 if( dst_vseg == NULL ) 408 printk("\n[ERROR] in %s : cannot create GPT\n", __FUNCTION__ ); 409 return -1; 410 } 411 412 // build extended pointer on parent VSL 413 parent_root_xp = XPTR( parent_cxy , &parent_vmm->vsegs_root ); 414 415 // loop on parent VSL xlist 416 XLIST_FOREACH( parent_root_xp , iter_xp ) 417 { 418 // get local and extended pointers on current parent vseg 419 parent_vseg_xp = XLIST_ELEMENT( iter_xp , vseg_t , xlist ); 420 parent_vseg = (vseg_t *)GET_PTR( parent_vseg_xp ); 421 422 // get vseg type 423 type = hal_remote_lw( XPTR( parent_cxy , &parent_vseg->type ) ); 424 425 426 vmm_dmsg("\n[DBG] %s : core[%x,%d] found parent vseg %s / vpn_base = %x\n", 427 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , vseg_type_str(type), 428 hal_remote_lw( XPTR( parent_cxy , &parent_vseg->vpn_base ) ) ); 429 430 // all parent vsegs - but STACK - must be copied in child VSL 431 if( type != VSEG_TYPE_STACK ) 253 432 { 254 // release all allocated vsegs 255 LIST_FOREACH( &dst_vmm->vsegs_root , iter ) 433 // allocate memory for a new child vseg 434 child_vseg = vseg_alloc(); 435 if( child_vseg == NULL ) // release all allocated vsegs 256 436 { 257 dst_vseg = LIST_ELEMENT( iter , vseg_t , list ); 258 vseg_free( dst_vseg ); 437 vmm_destroy( child_process ); 438 printk("\n[ERROR] in %s : cannot create vseg for child\n", __FUNCTION__ ); 439 return -1; 259 440 } 260 return ENOMEM; 261 } 262 263 // copy src_vseg to dst_vseg 264 vseg_init_from_ref( dst_vseg , XPTR( local_cxy , src_vseg ) ); 265 266 // register dst_vseg in DST VSL 267 vseg_attach( dst_vmm , dst_vseg ); 268 269 // copy SRC GPT to DST GPT / set COW for all writable vsegs, but the FILE type 270 bool_t cow = (src_vseg->type != VSEG_TYPE_FILE) && (src_vseg->flags & VSEG_WRITE); 271 error = hal_gpt_copy( &dst_vmm->gpt, 272 &src_vmm->gpt, 273 src_vseg->vpn_base, 274 src_vseg->vpn_size, 275 cow ); 276 if( error ) 277 { 278 printk("\n[ERROR] in %s : cannot copy page GPT\n", __FUNCTION__ ); 279 hal_gpt_destroy( &dst_vmm->gpt ); 280 return ENOMEM; 281 } 282 } 283 284 // release the src_vmm vsegs_lock 285 rwlock_wr_unlock( &src_vmm->vsegs_lock ); 286 287 // initialize STACK allocator 288 dst_vmm->stack_mgr.bitmap = 0; 289 dst_vmm->stack_mgr.vpn_base = CONFIG_VMM_STACK_BASE; 290 291 // initialize MMAP allocator 292 dst_vmm->mmap_mgr.vpn_base = CONFIG_VMM_HEAP_BASE; 293 dst_vmm->mmap_mgr.vpn_size = CONFIG_VMM_STACK_BASE - CONFIG_VMM_HEAP_BASE; 294 dst_vmm->mmap_mgr.first_free_vpn = CONFIG_VMM_HEAP_BASE; 441 442 // copy parent vseg to child vseg 443 vseg_init_from_ref( child_vseg , parent_vseg_xp ); 444 445 // register child vseg in child VSL 446 vseg_attach( child_vmm , child_vseg ); 447 448 vmm_dmsg("\n[DBG] %s : core[%x,%d] copied to child VSL : vseg %s / vpn_base = %x\n", 449 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , vseg_type_str(type), 450 hal_remote_lw( XPTR( parent_cxy , &parent_vseg->vpn_base ) ) ); 451 452 // copy DATA, MMAP, REMOTE, FILE parent GPT entries to child GPT 453 if( type != VSEG_TYPE_CODE ) 454 { 455 // activate the COW for DATA, MMAP, REMOTE vsegs only 456 cow = ( type != VSEG_TYPE_FILE ); 457 458 vpn_base = child_vseg->vpn_base; 459 vpn_size = child_vseg->vpn_size; 460 461 // scan pages in parent vseg 462 for( vpn = vpn_base ; vpn < (vpn_base + vpn_size) ; vpn++ ) 463 { 464 error = hal_gpt_pte_copy( &child_vmm->gpt, 465 XPTR( parent_cxy , &parent_vmm->gpt ), 466 vpn, 467 cow, 468 &ppn, 469 &mapped ); 470 if( error ) 471 { 472 vmm_destroy( child_process ); 473 printk("\n[ERROR] in %s : cannot copy GPT\n", __FUNCTION__ ); 474 return -1; 475 } 476 477 // increment page descriptor fork_nr for the referenced page if mapped 478 if( mapped ) 479 { 480 page_xp = ppm_ppn2page( ppn ); 481 page_cxy = GET_CXY( page_xp ); 482 page_ptr = (page_t *)GET_PTR( page_xp ); 483 hal_remote_atomic_add( XPTR( page_cxy , &page_ptr->fork_nr ) , 1 ); 484 485 vmm_dmsg("\n[DBG] %s : core[%x,%d] copied to child GPT : vpn %x\n", 486 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , vpn ); 487 488 } 489 } 490 } // end if no code & no stack 491 } // end if no stack 492 } // end loop on vsegs 493 494 // release the parent vsegs lock 495 remote_rwlock_rd_unlock( parent_lock_xp ); 496 497 // initialize the child VMM STACK allocator 498 child_vmm->stack_mgr.bitmap = 0; 499 child_vmm->stack_mgr.vpn_base = CONFIG_VMM_STACK_BASE; 500 501 // initialize the child VMM MMAP allocator 295 502 uint32_t i; 296 for( i = 0 ; i < 32 ; i++ ) list_root_init( &dst_vmm->mmap_mgr.zombi_list[i] ); 503 child_vmm->mmap_mgr.vpn_base = CONFIG_VMM_HEAP_BASE; 504 child_vmm->mmap_mgr.vpn_size = CONFIG_VMM_STACK_BASE - CONFIG_VMM_HEAP_BASE; 505 child_vmm->mmap_mgr.first_free_vpn = CONFIG_VMM_HEAP_BASE; 506 for( i = 0 ; i < 32 ; i++ ) list_root_init( &child_vmm->mmap_mgr.zombi_list[i] ); 297 507 298 508 // initialize instrumentation counters 299 dst_vmm->pgfault_nr = 0; 300 dst_vmm->u_err_nr = 0; 301 dst_vmm->m_err_nr = 0; 302 303 // copy base addresses 304 dst_vmm->kent_vpn_base = src_vmm->kent_vpn_base; 305 dst_vmm->args_vpn_base = src_vmm->args_vpn_base; 306 dst_vmm->envs_vpn_base = src_vmm->envs_vpn_base; 307 dst_vmm->heap_vpn_base = src_vmm->heap_vpn_base; 308 dst_vmm->code_vpn_base = src_vmm->code_vpn_base; 309 dst_vmm->data_vpn_base = src_vmm->data_vpn_base; 310 311 dst_vmm->entry_point = src_vmm->entry_point; 509 child_vmm->pgfault_nr = 0; 510 511 // copy base addresses from parent VMM to child VMM 512 child_vmm->kent_vpn_base = (vpn_t)hal_remote_lpt(XPTR(parent_cxy, &parent_vmm->kent_vpn_base)); 513 child_vmm->args_vpn_base = (vpn_t)hal_remote_lpt(XPTR(parent_cxy, &parent_vmm->args_vpn_base)); 514 child_vmm->envs_vpn_base = (vpn_t)hal_remote_lpt(XPTR(parent_cxy, &parent_vmm->envs_vpn_base)); 515 child_vmm->heap_vpn_base = (vpn_t)hal_remote_lpt(XPTR(parent_cxy, &parent_vmm->heap_vpn_base)); 516 child_vmm->code_vpn_base = (vpn_t)hal_remote_lpt(XPTR(parent_cxy, &parent_vmm->code_vpn_base)); 517 child_vmm->data_vpn_base = (vpn_t)hal_remote_lpt(XPTR(parent_cxy, &parent_vmm->data_vpn_base)); 518 519 child_vmm->entry_point = (intptr_t)hal_remote_lpt(XPTR(parent_cxy, &parent_vmm->entry_point)); 312 520 313 521 hal_fence(); … … 315 523 return 0; 316 524 317 } // vmm_ copy()525 } // vmm_fork_copy() 318 526 319 527 /////////////////////////////////////// 320 528 void vmm_destroy( process_t * process ) 321 529 { 530 xptr_t vseg_xp; 322 531 vseg_t * vseg; 323 532 … … 325 534 vmm_t * vmm = &process->vmm; 326 535 536 // get extended pointer on VSL root and VSL lock 537 xptr_t root_xp = XPTR( local_cxy , &vmm->vsegs_root ); 538 xptr_t lock_xp = XPTR( local_cxy , &vmm->vsegs_lock ); 539 327 540 // get lock protecting vseg list 328 r wlock_wr_lock( &vmm->vsegs_lock);329 330 // remove all vsegs registered in vmm331 while( ! list_is_empty( &vmm->vsegs_root) )541 remote_rwlock_wr_lock( lock_xp ); 542 543 // remove all vsegs registered in VSL 544 while( !xlist_is_empty( root_xp ) ) 332 545 { 333 vseg = LIST_FIRST( &vmm->vsegs_root , vseg_t , list ); 546 vseg_xp = XLIST_FIRST_ELEMENT( root_xp , vseg_t , xlist ); 547 vseg = (vseg_t *)GET_PTR( vseg_xp ); 334 548 vseg_detach( vmm , vseg ); 335 549 vseg_free( vseg ); … … 337 551 338 552 // release lock 339 r wlock_wr_unlock(&vmm->vsegs_lock);553 remote_rwlock_wr_unlock( lock_xp ); 340 554 341 555 // remove all vsegs from zombi_lists in MMAP allocator … … 345 559 while( !list_is_empty( &vmm->mmap_mgr.zombi_list[i] ) ) 346 560 { 347 vseg = LIST_FIRST( &vmm->mmap_mgr.zombi_list[i] , vseg_t , list );561 vseg = LIST_FIRST( &vmm->mmap_mgr.zombi_list[i] , vseg_t , zlist ); 348 562 vseg_detach( vmm , vseg ); 349 563 vseg_free( vseg ); … … 362 576 { 363 577 vmm_t * vmm = &process->vmm; 578 579 // scan the VSL 364 580 vseg_t * vseg; 365 list_entry_t * iter; 366 367 // scan the list of registered vsegs 368 LIST_FOREACH( &vmm->vsegs_root , iter ) 581 xptr_t iter_xp; 582 xptr_t vseg_xp; 583 xptr_t root_xp = XPTR( local_cxy , &vmm->vsegs_root ); 584 585 XLIST_FOREACH( root_xp , iter_xp ) 369 586 { 370 vseg = LIST_ELEMENT( iter , vseg_t , list ); 587 vseg_xp = XLIST_ELEMENT( iter_xp , vseg_t , xlist ); 588 vseg = (vseg_t *)GET_PTR( vseg_xp ); 371 589 372 590 if( ((vpn_base + vpn_size) > vseg->vpn_base) && … … 463 681 { 464 682 // get pointer on zombi vseg from zombi_list 465 vseg = LIST_FIRST( &mgr->zombi_list[index] , vseg_t , list );683 vseg = LIST_FIRST( &mgr->zombi_list[index] , vseg_t , zlist ); 466 684 467 685 // remove vseg from free-list 468 list_unlink( &vseg-> list );686 list_unlink( &vseg->zlist ); 469 687 470 688 // compute base … … 579 797 cxy ); 580 798 581 // attach vseg to vmm 582 rwlock_wr_lock( &vmm->vsegs_lock ); 799 // attach vseg to VSL 800 xptr_t lock_xp = XPTR( local_cxy , &vmm->vsegs_lock ); 801 remote_rwlock_wr_lock( lock_xp ); 583 802 vseg_attach( vmm , vseg ); 584 r wlock_wr_unlock( &vmm->vsegs_lock);803 remote_rwlock_wr_unlock( lock_xp ); 585 804 586 805 vmm_dmsg("\n[DBG] %s : core[%x,%d] exit / process %x / base %x / size %x / type %s\n", … … 601 820 uint32_t type = vseg->type; 602 821 603 // detach vseg from VMM 604 rwlock_wr_lock( &vmm->vsegs_lock ); 605 vseg_detach( &process->vmm , vseg ); 606 rwlock_wr_unlock( &vmm->vsegs_lock ); 822 // detach vseg from VSL 823 xptr_t lock_xp = XPTR( local_cxy , &vmm->vsegs_lock ); 824 remote_rwlock_wr_lock( lock_xp ); 825 vseg_detach( &process->vmm , vseg ); 826 remote_rwlock_wr_unlock( lock_xp ); 607 827 608 828 // release the stack slot to VMM stack allocator if STACK type … … 632 852 // update zombi_list 633 853 spinlock_lock( &mgr->lock ); 634 list_add_first( &mgr->zombi_list[index] , &vseg-> list );854 list_add_first( &mgr->zombi_list[index] , &vseg->zlist ); 635 855 spinlock_unlock( &mgr->lock ); 636 856 } … … 686 906 // set page table entry 687 907 ppn = ppm_page2ppn( XPTR( local_cxy , page ) ); 688 error = hal_gpt_set_pte( gpt , vpn , ppn , attr ); 908 error = hal_gpt_set_pte( gpt, 909 vpn, 910 attr, 911 ppn ); 689 912 if( error ) 690 913 { … … 695 918 696 919 return 0; 697 } 920 921 } // end vmm_map_kernel_vseg() 698 922 699 923 ///////////////////////////////////////// … … 729 953 intptr_t vaddr ) 730 954 { 731 list_entry_t * iter; 732 vseg_t * vseg = NULL; 733 734 // get lock protecting the vseg list 735 rwlock_rd_lock( &vmm->vsegs_lock ); 736 737 // scan the list of vsegs 738 LIST_FOREACH( &vmm->vsegs_root , iter ) 739 { 740 vseg = LIST_ELEMENT( iter , vseg_t , list ); 741 if( (vaddr >= vseg->min) && (vaddr < vseg->max) ) break; 742 } 743 744 // release the lock 745 rwlock_rd_unlock( &vmm->vsegs_lock ); 746 747 return vseg; 748 } 955 xptr_t iter_xp; 956 xptr_t vseg_xp; 957 vseg_t * vseg; 958 959 // get extended pointers on VSL lock and root 960 xptr_t lock_xp = XPTR( local_cxy , &vmm->vsegs_lock ); 961 xptr_t root_xp = XPTR( local_cxy , &vmm->vsegs_root ); 962 963 // get lock protecting the VSL 964 remote_rwlock_rd_lock( lock_xp ); 965 966 // scan the list of vsegs in VSL 967 XLIST_FOREACH( root_xp , iter_xp ) 968 { 969 vseg_xp = XLIST_ELEMENT( iter_xp , vseg_t , xlist ); 970 vseg = (vseg_t *)GET_PTR( vseg_xp ); 971 if( (vaddr >= vseg->min) && (vaddr < vseg->max) ) 972 { 973 // return success 974 remote_rwlock_rd_unlock( lock_xp ); 975 return vseg; 976 } 977 } 978 979 // return failure 980 remote_rwlock_rd_unlock( lock_xp ); 981 return NULL; 982 983 } // end vseg_from_vaddr() 749 984 750 985 ///////////////////////////////////////////// … … 769 1004 if( vseg == NULL) return EINVAL; 770 1005 771 // get VMM lock protecting vsegs list 772 rwlock_wr_lock( &vmm->vsegs_lock ); 1006 // get extended pointer on VSL lock 1007 xptr_t lock_xp = XPTR( local_cxy , &vmm->vsegs_lock ); 1008 1009 // get lock protecting VSL 1010 remote_rwlock_wr_lock( lock_xp ); 773 1011 774 1012 if( (vseg->min > addr_min) || (vseg->max < addr_max) ) // region not included in vseg … … 831 1069 832 1070 // release VMM lock 833 r wlock_wr_unlock( &vmm->vsegs_lock);1071 remote_rwlock_wr_unlock( lock_xp ); 834 1072 835 1073 return error; … … 1129 1367 ppn_t new_ppn; // new PTE_PPN 1130 1368 uint32_t new_attr; // new PTE_ATTR 1131 xptr_t page_xp; // extended pointer on allocated page descriptor1132 1369 error_t error; 1133 1370 … … 1137 1374 1138 1375 vmm_dmsg("\n[DBG] %s : core[%x,%d] enter for vpn = %x in process %x / cow = %d\n", 1139 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , vpn , process->pid , %d);1376 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , vpn , process->pid , cow ); 1140 1377 1141 1378 // get VMM pointer … … 1159 1396 hal_gpt_get_pte( &vmm->gpt , vpn , &old_attr , &old_ppn ); 1160 1397 1161 // for both copy_on_write and page_fault events, we allocate a physical page, 1162 // initialize it, register it in the GPT, and return the new_ppn and new_attr 1398 // for both "copy_on_write" and "page_fault" events, allocate a physical page, 1399 // initialize it, register it in the reference GPT, update GPT copies in all 1400 // clusters containing a copy, and return the new_ppn and new_attr 1163 1401 1164 1402 if( cow ) ////////////// copy_on_write request /////////// 1165 1403 { 1166 assert( ( *attr & GPT_MAPPED) , __FUNCTION__ ,1167 "PTE must be mapped for a copy-on-write \n" );1168 1169 vmm_dmsg("\n[DBG] %s : core[%x,%d] page %x must be copied => do it\n",1404 assert( (old_attr & GPT_MAPPED) , __FUNCTION__ , 1405 "PTE must be mapped for a copy-on-write exception\n" ); 1406 1407 excp_dmsg("\n[DBG] %s : core[%x,%d] handling COW for vpn %x\n", 1170 1408 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , vpn ); 1171 1409 1172 // allocate a physical page depending on vseg type 1173 page_xp = vmm_page_allocate( vseg , vpn ); 1174 1175 if( page_xp == XPTR_NULL ) 1410 // get extended pointer, cluster and local pointer on page descriptor 1411 xptr_t page_xp = ppm_ppn2page( old_ppn ); 1412 cxy_t page_cxy = GET_CXY( page_xp ); 1413 page_t * page_ptr = (page_t *)GET_PTR( page_xp ); 1414 1415 // get number of pending forks in page descriptor 1416 uint32_t count = hal_remote_lw( XPTR( page_cxy , &page_ptr->fork_nr ) ); 1417 1418 if( count ) // pending fork => allocate a new page, copy it, reset COW 1176 1419 { 1177 printk("\n[ERROR] in %s : no memory / process = %x / vpn = %x\n", 1178 __FUNCTION__ , process->pid , vpn ); 1179 return ENOMEM; 1420 // allocate a new physical page 1421 page_xp = vmm_page_allocate( vseg , vpn ); 1422 if( page_xp == XPTR_NULL ) 1423 { 1424 printk("\n[ERROR] in %s : no memory / process = %x / vpn = %x\n", 1425 __FUNCTION__ , process->pid , vpn ); 1426 return -1; 1427 } 1428 1429 // compute allocated page PPN 1430 new_ppn = ppm_page2ppn( page_xp ); 1431 1432 // copy old page content to new page 1433 xptr_t old_base_xp = ppm_ppn2base( old_ppn ); 1434 xptr_t new_base_xp = ppm_ppn2base( new_ppn ); 1435 memcpy( GET_PTR( new_base_xp ), 1436 GET_PTR( old_base_xp ), 1437 CONFIG_PPM_PAGE_SIZE ); 1438 } 1439 else // no pending fork => keep the existing page, reset COW 1440 { 1441 new_ppn = old_ppn; 1180 1442 } 1181 1443 1182 // compute allocated page PPN 1183 new_ppn = ppm_page2ppn( page_xp ); 1184 1185 // copy old page content to new page 1186 xptr_t old_base_xp = ppm_ppn2base( old_ppn ); 1187 xptr_t new_base_xp = ppm_ppn2base( new_ppn ); 1188 memcpy( GET_PTR( new_base_xp ), 1189 GET_PTR( old_base_xp ), 1190 CONFIG_PPM_PAGE_SIZE ); 1191 1192 // update attributes: reset COW and set WRITABLE 1193 new_attr = old_attr & ~GPT_COW; 1194 new_attr = new_attr | GPT_WRITABLE; 1195 1196 // register PTE in GPT 1197 error = hal_gpt_set_pte( &vmm->gpt , vpn , new_ppn , new_attr ); 1198 1199 if( error ) 1444 // build new_attr : reset COW and set WRITABLE, 1445 new_attr = (old_attr | GPT_WRITABLE) & (~GPT_COW); 1446 1447 // update GPT[vpn] for all GPT copies 1448 // to maintain coherence of copies 1449 vmm_update_pte( process, 1450 vpn, 1451 new_attr, 1452 new_ppn ); 1453 1454 // decrement fork_nr in page descriptor 1455 hal_remote_atomic_add( XPTR( page_cxy , &page_ptr->fork_nr ) , -1 ); 1456 } 1457 else /////////////// page_fault request /////////// 1458 { 1459 if( (old_attr & GPT_MAPPED) == 0 ) // true page_fault => map it 1200 1460 { 1201 printk("\n[ERROR] in %s : cannot update GPT / process = %x / vpn = %x\n", 1202 __FUNCTION__ , process->pid , vpn ); 1203 return error; 1204 } 1205 } 1206 else //////////////////// page_fault request /////////// 1207 { 1208 if( (old_attr & GPT_MAPPED) == 0 ) // PTE unmapped in ref GPT 1209 { 1210 1211 vmm_dmsg("\n[DBG] %s : core[%x,%d] page %x unmapped => try to map it\n", 1461 1462 excp_dmsg("\n[DBG] %s : core[%x,%d] handling page fault for vpn %x\n", 1212 1463 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , vpn ); 1213 1464 1214 // allocate one physical page, depending on vseg type1465 // allocate new_ppn, depending on vseg type 1215 1466 error = vmm_get_one_ppn( vseg , vpn , &new_ppn ); 1216 1217 1467 if( error ) 1218 1468 { 1219 1469 printk("\n[ERROR] in %s : no memory / process = %x / vpn = %x\n", 1220 1470 __FUNCTION__ , process->pid , vpn ); 1221 return error;1471 return -1; 1222 1472 } 1223 1473 1224 // define attributesfrom vseg flags1474 // define new_attr from vseg flags 1225 1475 new_attr = GPT_MAPPED | GPT_SMALL; 1226 1476 if( vseg->flags & VSEG_USER ) new_attr |= GPT_USER; … … 1229 1479 if( vseg->flags & VSEG_CACHE ) new_attr |= GPT_CACHABLE; 1230 1480 1231 // register PTE in GPT 1232 error = hal_gpt_set_pte( &vmm->gpt , vpn , new_ppn , new_attr ); 1233 1481 // register new PTE in reference GPT 1482 // on demand policy => no update of GPT copies 1483 error = hal_gpt_set_pte( &vmm->gpt, 1484 vpn, 1485 new_attr, 1486 new_ppn ); 1234 1487 if( error ) 1235 1488 { 1236 1489 printk("\n[ERROR] in %s : cannot update GPT / process = %x / vpn = %x\n", 1237 1490 __FUNCTION__ , process->pid , vpn ); 1238 return error;1491 return -1; 1239 1492 } 1240 1493 } 1241 else 1494 else // mapped in reference GPT => get it 1242 1495 { 1496 new_ppn = old_ppn; 1243 1497 new_attr = old_attr; 1244 new_ppn = old_ppn;1245 1498 } 1246 1499 } 1247 1500 1248 vmm_dmsg("\n[DBG] %s : core[%x,%d] exit for vpn =%x / ppn = %x / attr = %x\n",1501 excp_dmsg("\n[DBG] %s : core[%x,%d] update GPT for vpn %x / ppn = %x / attr = %x\n", 1249 1502 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , vpn , new_ppn , new_attr ); 1250 1503 1504 // retur success 1251 1505 *ppn = new_ppn; 1252 1506 *attr = new_attr; … … 1282 1536 1283 1537 // update local GPT 1284 error |= hal_gpt_set_pte( &vmm->gpt , vpn , ppn , attr ); 1538 error |= hal_gpt_set_pte( &vmm->gpt, 1539 vpn, 1540 attr, 1541 ppn ); 1285 1542 } 1286 1543 else // local cluster is the reference cluster … … 1297 1554 } // end vmm_handle_page_fault() 1298 1555 1299 //////////////////////////////////////////// ///1300 error_t vmm_ copy_on_write( process_t * process,1301 1556 //////////////////////////////////////////// 1557 error_t vmm_handle_cow( process_t * process, 1558 vpn_t vpn ) 1302 1559 { 1303 1560 uint32_t attr; // missing page attributes … … 1324 1581 1325 1582 // update local GPT 1326 error |= hal_gpt_set_pte( &vmm->gpt , vpn , ppn , attr ); 1583 error |= hal_gpt_set_pte( &vmm->gpt, 1584 vpn, 1585 attr, 1586 ppn ); 1327 1587 } 1328 1588 else // local cluster is the reference cluster … … 1337 1597 return error; 1338 1598 1339 } // end vmm_ copy_on_write()1599 } // end vmm_handle_cow() 1340 1600 1341 1601 /////////////////////////////////////////// -
trunk/kernel/mm/vmm.h
r407 r408 89 89 /********************************************************************************************* 90 90 * This structure defines the Virtual Memory Manager for a given process in a given cluster. 91 * This local VMM provides three main services: 92 * 1) It registers all vsegs statically or dynamically defined in the vseg list. 93 * 2) It allocates virtual memory space for the STACKS and MMAP vsegs (FILE/ANON/REMOTE). 94 * 3) It contains the local copy of the generic page table descriptor. 91 * This local VMM provides four main services: 92 * 1) It registers all vsegs in the local copy of the vseg list (VSL). 93 * 2) It contains the local copy of the generic page table (GPT). 94 * 3) The stack manager dynamically allocates virtual memory space for the STACK vsegs. 95 * 4) The mmap manager dynamically allocates virtual memory for the (FILE/ANON/REMOTE) vsegs. 96 ******************************************************a************************************** 97 * Implementation notes: 98 * 1. The VSL contains only local vsegs, but it is implemented as an xlist, and protected by 99 * a remote_rwlock, because it can be accessed by a thread running in a remote cluster. 100 * An exemple is the vmm_fork_copy() function. 101 * 2. In most custers, the VSL and GPT are only partial copies of the reference VSL and GPT 102 * structures, stored in the reference cluster. 95 103 ********************************************************************************************/ 96 104 97 105 typedef struct vmm_s 98 106 { 99 rwlock_t vsegs_lock; /*! lock protecting the vsegs list */ 100 list_entry_t vsegs_root; /*! all vsegs in same process and same cluster */ 101 uint32_t vsegs_nr; /*! total number of local vsegs */ 102 103 gpt_t gpt; /*! embedded generic page table descriptor */ 104 105 stack_mgr_t stack_mgr; /*! embedded STACK vsegs allocator */ 106 mmap_mgr_t mmap_mgr; /*! embedded MMAP vsegs allocator */ 107 108 uint32_t pgfault_nr; /*! page fault counter (instrumentation) */ 109 uint32_t u_err_nr; /*! TODO ??? [AG] */ 110 uint32_t m_err_nr; /*! TODO ??? [AG] */ 111 112 vpn_t kent_vpn_base; /*! kentry vseg first page */ 113 vpn_t args_vpn_base; /*! args vseg first page */ 114 vpn_t envs_vpn_base; /*! envs zone first page */ 115 vpn_t heap_vpn_base; /*! envs zone first page */ 116 vpn_t code_vpn_base; /*! code zone first page */ 117 vpn_t data_vpn_base; /*! data zone first page */ 118 119 intptr_t entry_point; /*! main thread entry point */ 107 remote_rwlock_t vsegs_lock; /*! lock protecting the vsegs list */ 108 xlist_entry_t vsegs_root; /*! VSL root (VSL only complete in reference) */ 109 uint32_t vsegs_nr; /*! total number of local vsegs */ 110 111 gpt_t gpt; /*! Generic Page Table (complete in reference) */ 112 113 stack_mgr_t stack_mgr; /*! embedded STACK vsegs allocator */ 114 mmap_mgr_t mmap_mgr; /*! embedded MMAP vsegs allocator */ 115 116 uint32_t pgfault_nr; /*! page fault counter (instrumentation) */ 117 118 vpn_t kent_vpn_base; /*! kentry vseg first page */ 119 vpn_t args_vpn_base; /*! args vseg first page */ 120 vpn_t envs_vpn_base; /*! envs zone first page */ 121 vpn_t heap_vpn_base; /*! envs zone first page */ 122 vpn_t code_vpn_base; /*! code zone first page */ 123 vpn_t data_vpn_base; /*! data zone first page */ 124 125 intptr_t entry_point; /*! main thread entry point */ 120 126 } 121 127 vmm_t; … … 147 153 148 154 /********************************************************************************************* 149 * This function is called by the sys_fork() system call. 150 * It copies the content of a parent process descriptor VMM to a child process VMM. 151 * - All vsegs registered in the source VSL are copied in the destination VSL. 152 * - All PTEs registered in the source GPT are copied in destination GPT. For all writable 153 * PTEs - but the FILE vsegs - the WRITABLE flag is reset and the COW flag is set in 154 * the destination GPT. 155 ********************************************************************************************* 156 * @ dst_process : pointer on destination process descriptor. 157 * @ src_process : pointer on source process descriptor. 155 * This function is called by the process_fork_create() function. It partially copies 156 * the content of a remote parent process VMM to the local child process VMM: 157 * - all DATA, MMAP, REMOTE vsegs registered in the parent VSL are registered in the child 158 * VSL, and all valid GPT entries in parent GPT are copied to the child GPT. 159 * The WRITABLE flag is reset and the COW flag is set in child GPT. 160 * - all CODE vsegs registered in the parent VSL are registered in the child VSL, but the 161 * GPT entries are not copied in the chilf GPT, that will be dynamically updated from 162 * the .elf file when a page fault is reported. 163 * - all FILE vsegs registered in the parent VSL are registered in the child VSL, and all 164 * valid GPT entries in parent GPT are copied to the child GPT. The COW flag is not set. 165 * - no STACK vseg is copied from parent VMM to child VMM, because the child STACK vseg 166 * must be copied from the cluster containing the user thread requesting the fork(). 167 ********************************************************************************************* 168 * @ child_process : local pointer on local child process descriptor. 169 * @ parent_process_xp : extended pointer on remote parent process descriptor. 158 170 * @ return 0 if success / return ENOMEM if failure. 159 171 ********************************************************************************************/ 160 error_t vmm_copy( struct process_s * dst_process, 161 struct process_s * src_process ); 162 163 /********************************************************************************************* 164 * This function removes all vsegs registered in in a virtual memory manager, 165 * and releases the memory allocated to the local generic page table. 172 error_t vmm_fork_copy( struct process_s * child_process, 173 xptr_t parent_process_xp ); 174 175 /********************************************************************************************* 176 * This function is called by the process_make_fork() function to handle the fork syscall. 177 * It set the COW flag, and reset the WRITABLE flag of all GPT entries of the DATA, MMAP, 178 * and REMOTE vsegs of a process identified by the <process> argument. 179 * It must be called by a thread running in the reference cluster, that contains the complete 180 * list of vsegs. Use the rpc_vmm_set_cow_client() when the calling thread client is remote. 181 * It updates all copies of the process in all clusters, to maintain coherence in GPT copies, 182 * using the list of copies stored in the owner process, and using remote_write accesses to 183 * update the remote GPTs. It cannot fail, as only mapped entries in GPT copies are updated. 184 ********************************************************************************************* 185 * @ process : local pointer on local reference process descriptor. 186 ********************************************************************************************/ 187 void vmm_set_cow( struct process_s * process ); 188 189 /********************************************************************************************* 190 * This function is called by the vmm_get_pte() function in case of COW exception. 191 * It modifies both the PPN an the attributes for a GPT entry identified by the <process> 192 * and <vpn> arguments. 193 * It updates all copies of the process in all clusters, to maintain coherence in GPT copies, 194 * using the list of copies stored in the owner process, and using remote_write accesses to 195 * update the remote GPTs. It cannot fail, as only mapped entries in GPT copies are updated. 196 ********************************************************************************************* 197 * @ process : local pointer on local process descriptor. 198 * @ vpn : PTE index. 199 * @ attr : PTE / attributes. 200 * @ ppn : PTE / physical page index. 201 ********************************************************************************************/ 202 void vmm_update_pte( struct process_s * process, 203 vpn_t vpn, 204 uint32_t attr, 205 ppn_t ppn ); 206 207 /********************************************************************************************* 208 * This function removes all vsegs registered in in the virtual memory manager of the 209 * process identified by the <process> argument. 210 * It releases the memory allocated to the local generic page table. 166 211 ********************************************************************************************* 167 212 * @ process : pointer on process descriptor. … … 315 360 * @ returns 0 if success / returns ENOMEM if no memory. 316 361 ********************************************************************************************/ 317 error_t vmm_ copy_on_write( struct process_s * process,318 362 error_t vmm_handle_cow( struct process_s * process, 363 vpn_t vpn ); 319 364 320 365 /********************************************************************************************* -
trunk/kernel/mm/vseg.c
r407 r408 194 194 195 195 // add vseg in vmm list 196 list_add_last( &vmm->vsegs_root , &vseg->list ); 196 xlist_add_last( XPTR( local_cxy , &vmm->vsegs_root ), 197 XPTR( local_cxy , &vseg->xlist ) ); 197 198 } 198 199 … … 205 206 206 207 // remove vseg from vmm list 207 list_unlink( &vseg->list);208 } 209 208 xlist_unlink( XPTR( local_cxy , &vseg->xlist ) ); 209 } 210 -
trunk/kernel/mm/vseg.h
r407 r408 68 68 /********************************************************************************************** 69 69 * This structure defines a virtual segment descriptor. 70 * - The VSL contains only local vsegs, but is implemented as an xlist, because it can be 71 * accessed by thread running in a remote cluster. 72 * - The zombi list is used by the local MMAP allocator. It is implemented as a local list. 70 73 *********************************************************************************************/ 71 74 72 75 typedef struct vseg_s 73 76 { 74 list_entry_t list; /*! all vsegs in same process / same free list if mmap */ 77 xlist_entry_t xlist; /*! all vsegs in same VSL (or same zombi list) */ 78 list_entry_t zlist; /*! all vsegs in same zombi list */ 75 79 struct vmm_s * vmm; /*! pointer on associated VM manager */ 76 80 uint32_t type; /*! vseg type */ -
trunk/kernel/syscalls/sys_exec.c
r407 r408 150 150 // Implementation note: 151 151 // This function build an exec_info_t structure containing all informations 152 // required to create the new process descriptor and the associated thread. 152 // required to initialize the new process descriptor and the associated thread. 153 // It includes the process PID (unchanged), main() arguments, environment variables, 154 // and the pathname to the new process .elf file. 153 155 // It calls the process_exec_get_strings() functions to copy the main() arguments and 154 156 // the environment variables from user buffers to the exec_info_t structure, allocate 155 157 // and call the process_make_exec() function. 156 // Finally, it destroys the calling thread and process. 158 // As it must destroy all parent process copies, and all parent threads in all clusters, 159 // the process_make_exec() function must be executed in the parent owner cluster, 160 // and this sys_exec() function uses a RPC to access the owner cluster if required. 161 // 157 162 // TODO : the args & envs arguments are not supported yet : both must be NULL 158 163 ///////////////////////////////////////////////////////////////////////////////////////// … … 169 174 tm_start = hal_get_cycles(); 170 175 171 // get p ointers on parent process and thread176 // get parent process pid 172 177 thread_t * this = CURRENT_THREAD; 173 178 process_t * process = this->process; … … 177 182 __FUNCTION__, local_cxy, this->core->lid, pid, (uint32_t)hal_get_cycles() ); 178 183 179 sched_display( 0 ); 184 // get owner cluster 185 cxy_t owner_cxy = CXY_FROM_PID( pid ); 180 186 181 187 // check pathname length … … 189 195 // copy pathname in exec_info structure (kernel space) 190 196 hal_strcpy_from_uspace( exec_info.path , pathname , CONFIG_VFS_MAX_PATH_LENGTH ); 197 191 198 // check args argument 192 199 assert( (args == NULL) , __FUNCTION__ , … … 196 203 assert( (envs == NULL) , __FUNCTION__ , 197 204 "args not supported yet\n" ); 198 199 // compute client_cxy (local cluster) and server_cxy (target cluster)200 cxy_t cxy_server = CXY_FROM_PID( pid );201 cxy_t cxy_client = local_cxy;202 203 // register parent process in exec_info204 exec_info.parent_xp = process->ref_xp;205 206 // new process keep the parent process PID207 exec_info.keep_pid = true;208 205 209 206 // check and store args in exec_info structure if required … … 229 226 } 230 227 228 // register PID in exec_info 229 exec_info.pid = pid; 230 231 231 // call process_make_exec (local or remote) 232 if( cxy_server == cxy_client)232 if( owner_cxy == local_cxy ) 233 233 { 234 234 error = process_make_exec( &exec_info ); … … 236 236 else 237 237 { 238 rpc_process_exec_client( cxy_server , &exec_info , &error ); 238 rpc_process_make_exec_client( owner_cxy, 239 &exec_info, 240 &error ); 239 241 } 240 242 … … 242 244 { 243 245 printk("\n[ERROR] in %s : cannot create new process %x in cluster %x\n", 244 __FUNCTION__, pid, cxy_server);246 __FUNCTION__, pid, owner_cxy ); 245 247 this->errno = error; 246 248 return -1; 247 249 } 248 249 // FIXME delete the local process descriptor250 // process_kill( process );251 250 252 251 tm_end = hal_get_cycles(); -
trunk/kernel/syscalls/sys_fork.c
r407 r408 41 41 int sys_fork() 42 42 { 43 process_t * parent_process; // pointer on parent process descriptor 44 pid_t parent_pid; // parent process identifier 45 thread_t * parent_thread; // pointer on parent thread descriptor 46 process_t * child_process; // pointer on child process descriptor 47 pid_t child_pid; // child process identifier 48 thread_t * child_thread; // pointer on child main thread descriptor 49 cxy_t target_cxy; // target cluster for forked child process 50 error_t error; 43 process_t * parent_process_ptr; // pointer on local parent process descriptor 44 xptr_t parent_thread_xp; // extended pointer on parent thread descriptor 45 pid_t parent_pid; // parent process identifier 46 thread_t * parent_thread_ptr; // local pointer on local parent thread descriptor 51 47 52 uint64_t tm_start; 53 uint64_t tm_end; 48 pid_t child_pid; // child process identifier 49 thread_t * child_thread_ptr; // local pointer on remote child thread descriptor 50 cxy_t target_cxy; // target cluster for forked child process 51 52 xptr_t ref_process_xp; // extended pointer on reference parent process 53 cxy_t ref_process_cxy; // cluster of reference parent process 54 process_t * ref_process_ptr; // local pointer on reference parent process 55 56 error_t error; 57 58 uint64_t tm_start; 59 uint64_t tm_end; 54 60 55 61 tm_start = hal_get_cycles(); 56 62 57 // get pointers on parent process and thread 58 parent_thread = CURRENT_THREAD; 59 parent_process = parent_thread->process; 60 parent_pid = parent_process->pid; 63 // get pointers on local parent process and thread 64 parent_thread_ptr = CURRENT_THREAD; 65 parent_thread_xp = XPTR( local_cxy , parent_thread_ptr ); 66 parent_process_ptr = parent_thread_ptr->process; 67 parent_pid = parent_process_ptr->pid; 61 68 62 fork_dmsg("\n[DBG] %s : core[%x,%d] enters for process %x/ cycle %d\n",63 __FUNCTION__ , local_cxy , parent_thread->core->lid , parent_pid, (uint32_t)tm_start );69 fork_dmsg("\n[DBG] %s : core[%x,%d] parent process %x enters / cycle %d\n", 70 __FUNCTION__, local_cxy, parent_thread_ptr->core->lid, parent_pid, (uint32_t)tm_start ); 64 71 65 // check parent process children number 66 if( hal_atomic_add( &parent_process->children_nr , 1 ) >= CONFIG_PROCESS_MAX_CHILDREN ) 72 // get infos on reference process 73 ref_process_xp = parent_process_ptr->ref_xp; 74 ref_process_cxy = GET_CXY( ref_process_xp ); 75 ref_process_ptr = (process_t *)GET_PTR( ref_process_xp ); 76 77 // check parent process children number from reference 78 xptr_t children_xp = XPTR( ref_process_cxy , &ref_process_ptr->children_nr ); 79 if( hal_remote_atomic_add( children_xp , 1 ) >= CONFIG_PROCESS_MAX_CHILDREN ) 67 80 { 68 81 printk("\n[ERROR] in %s : too much children processes\n", __FUNCTION__); 69 hal_atomic_add ( &parent_process->children_nr , -1 ); 70 return EAGAIN; 82 hal_remote_atomic_add ( children_xp , -1 ); 83 parent_thread_ptr->errno = EAGAIN; 84 return -1; 71 85 } 72 86 73 // Select target cluster for future migration ofchild process and main thread.87 // Select target cluster for child process and main thread. 74 88 // If placement is not user-defined, the placement is defined by the DQDT. 75 // The two first processes ("init" and "sh") on boot cluster do not migrate. 76 77 if( parent_thread->fork_user ) 89 if( parent_thread_ptr->fork_user ) // user defined placement 78 90 { 79 // user defined placement 80 target_cxy = parent_thread->fork_cxy; 81 parent_thread->fork_user = false; 91 target_cxy = parent_thread_ptr->fork_cxy; 92 parent_thread_ptr->fork_user = false; 82 93 } 83 else if( (LPID_FROM_PID(parent_process->pid) < 2) && (local_cxy == 0) ) 84 { 85 // 2 first process stay in boot cluster 86 target_cxy = local_cxy; 87 } 88 else 94 else // DQDT placement 89 95 { 90 // DQDT placement91 96 target_cxy = dqdt_get_cluster_for_process(); 92 97 } 93 98 94 //printk("\n[DBG] %s : core[%x,%d] for process %x selects target_cluster = %x\n", 95 //__FUNCTION__ , local_cxy , parent_thread->core->lid , parent_pid , target_cxy ); 96 97 // allocates memory in local cluster for the child process descriptor 98 child_process = process_alloc(); 99 100 if( child_process == NULL ) 101 { 102 printk("\n[ERROR] in %s : cannot allocate child process\n", __FUNCTION__ ); 103 hal_atomic_add ( &parent_process->children_nr , -1 ); 104 return EAGAIN; 105 } 106 107 // get a new PID for child process, 108 if( target_cxy == local_cxy ) // target cluster is local 99 // call process_make_fork in target cluster 100 if( target_cxy == local_cxy ) 109 101 { 110 error = cluster_pid_alloc( XPTR( target_cxy , child_process ) , &child_pid ); 102 error = process_make_fork( ref_process_xp, 103 parent_thread_xp, 104 &child_pid, 105 &child_thread_ptr ); 111 106 } 112 else // target cluster is remote107 else 113 108 { 114 rpc_process_pid_alloc_client( target_cxy , child_process , &error , &child_pid ); 109 rpc_process_make_fork_client( target_cxy, 110 ref_process_xp, 111 parent_thread_xp, 112 &child_pid, 113 &child_thread_ptr, 114 &error ); 115 115 } 116 116 117 117 if( error ) 118 118 { 119 printk("\n[ERROR] in %s : cannot allocate PID\n", __FUNCTION__ ); 120 hal_atomic_add ( &parent_process->children_nr , -1);121 p rocess_destroy( child_process );122 return EAGAIN;119 printk("\n[ERROR] in %s : cannot fork process %x in cluster %x\n", 120 __FUNCTION__, parent_pid, local_cxy ); 121 parent_thread_ptr->errno = EAGAIN; 122 return -1; 123 123 } 124 124 125 // initialize and register the child process descriptor 126 process_reference_init( child_process , child_pid , XPTR(local_cxy, parent_process) ); 127 128 // initialises child process standard files structures 129 // ( root / cwd / bin ) from parent process descriptor 130 131 vfs_file_count_up( parent_process->vfs_root_xp ); 132 child_process->vfs_root_xp = parent_process->vfs_root_xp; 133 134 vfs_file_count_up( parent_process->vfs_cwd_xp ); 135 child_process->vfs_cwd_xp = parent_process->vfs_cwd_xp; 136 137 vfs_file_count_up( parent_process->vfs_bin_xp ); 138 child_process->vfs_bin_xp = parent_process->vfs_bin_xp; 139 140 // copy the parent process fd_array to the child process fd_array 141 process_fd_remote_copy( XPTR( local_cxy , &child_process->fd_array ), 142 XPTR( local_cxy , &parent_process->fd_array ) ); 143 144 //printk("\n[DBG] %s : core[%x,%d] for process %x created child process %x\n", 145 //__FUNCTION__ , local_cxy , parent_thread->core->lid , parent_pid , child_pid ); 146 147 // replicate VMM 148 error = vmm_copy( child_process , parent_process ); 149 150 if( error ) 151 { 152 printk("\n[ERROR] in %s : cannot duplicate VMM\n", __FUNCTION__ ); 153 hal_atomic_add ( &parent_process->children_nr , -1 ); 154 process_destroy( child_process ); 155 return ENOMEM; 156 } 157 158 //printk("\n[DBG] %s : core[%x,%d] for process %x duplicated vmm in child process\n", 159 //__FUNCTION__ , local_cxy , parent_thread->core->lid , parent_pid ); 160 //vmm_display( parent_process , true ); 161 //vmm_display( child_process , true ); 162 163 // create child main thread in local cluster 164 error = thread_user_fork( child_process, 165 parent_thread->u_stack_size, 166 parent_thread->u_stack_base, 167 &child_thread ); 168 if( error ) 169 { 170 printk("\n[ERROR] in %s : cannot duplicate main thread\n", __FUNCTION__ ); 171 hal_atomic_add( &parent_process->children_nr , -1 ); 172 process_destroy( child_process ); 173 return ENOMEM; 174 } 175 176 //printk("\n[DBG] %s : core[%x,%d] initialised child main thread\n", 177 //__FUNCTION__ , local_cxy , parent_thread->core->lid ); 178 179 // update DQDT for the child thread 180 dqdt_local_update_threads( 1 ); 181 182 // set child_thread FPU_context from parent_thread register values 183 // only when the parent process is the FPU owner 184 if( CURRENT_THREAD->core->fpu_owner == parent_thread ) 125 // set remote child FPU_context from parent_thread register values 126 // only when the parent thread is the FPU owner 127 if( CURRENT_THREAD->core->fpu_owner == parent_thread_ptr ) 185 128 { 186 hal_fpu_context_save( child_thread->fpu_context);129 hal_fpu_context_save( XPTR( target_cxy , child_thread_ptr ) ); 187 130 } 188 131 189 // set child_thread CPU context from parent_thread register values 190 hal_do_cpu_save( child_thread->cpu_context, 191 child_thread, 192 (int)((intptr_t)child_thread - (intptr_t)parent_thread) ); 132 // set remote child CPU context from parent_thread register values 133 hal_cpu_context_fork( XPTR( target_cxy , child_thread_ptr ) ); 193 134 194 195 // from this point, both parent and child threads execute the following code 196 // but child execute it only when it has been unblocked by its parent 135 // From this point, both parent and child threads execute the following code. 136 // They can be distinguished by the CURRENT_THREAD value, and child will only 137 // execute it when it is unblocked by parent. 138 // - parent unblock child, and return child PID to user application. 139 // - child thread does nothing, and return 0 to user pplication 197 140 198 141 thread_t * current = CURRENT_THREAD; 199 142 200 if( current == parent_thread )143 if( current == parent_thread_ptr ) // current == parent thread 201 144 { 202 145 // parent_thread unblock child_thread 203 thread_unblock( XPTR( local_cxy , child_thread) , THREAD_BLOCKED_GLOBAL );146 thread_unblock( XPTR( target_cxy , child_thread_ptr ) , THREAD_BLOCKED_GLOBAL ); 204 147 205 148 tm_end = hal_get_cycles(); … … 207 150 fork_dmsg("\n[DBG] %s : core[%x,%d] parent_process %x exit / cycle %d\n" 208 151 " child_process %x / child_thread = %x / cost = %d\n", 209 __FUNCTION__, local_cxy, parent_thread ->core->lid, parent_pid, (uint32_t)tm_start,210 child_pid, child_thread ->trdid , (uint32_t)(tm_end - tm_start) );152 __FUNCTION__, local_cxy, parent_thread_ptr->core->lid, parent_pid, (uint32_t)tm_end, 153 child_pid, child_thread_ptr->trdid , (uint32_t)(tm_end - tm_start) ); 211 154 212 155 return child_pid; 213 156 } 214 else // current == child_thread157 else // current == child_thread 215 158 { 216 assert( (current == child_thread) , __FUNCTION__ , 217 "current thread %x is not the child thread %x\n", current , child_thread);159 160 tm_end = hal_get_cycles(); 218 161 219 162 fork_dmsg("\n[DBG] %s : core[%x,%d] child process %x exit / cycle %d\n", 220 __FUNCTION__, local_cxy, parent_thread ->core->lid, child_pid, (uint32_t)hal_get_cycles());163 __FUNCTION__, local_cxy, parent_thread_ptr->core->lid, child_pid, (uint32_t)tm_end ); 221 164 222 165 return 0; -
trunk/kernel/syscalls/sys_get_cycle.c
r407 r408 48 48 { 49 49 printk("\n[ERROR] in %s : user buffer unmapped for thread %x in process %x\n", 50 50 __FUNCTION__ , this->trdid , process->pid ); 51 51 this->errno = EFAULT; 52 52 return -1; -
trunk/kernel/syscalls/sys_read.c
r407 r408 61 61 xptr_t file_xp; // remote file extended pointer 62 62 uint32_t nbytes; // number of bytes actually read 63 63 reg_t save_sr; // required to enable IRQs during syscall 64 64 uint32_t tm_start; 65 65 uint32_t tm_end; … … 93 93 return -1; 94 94 } 95 96 // enable IRQs 97 hal_enable_irq( &save_sr ); 95 98 96 99 // get extended pointer on remote file descriptor … … 150 153 return -1; 151 154 } 155 156 // restore IRQs 157 hal_restore_irq( save_sr ); 152 158 153 159 hal_fence(); … … 190 196 #endif 191 197 192 syscall_dmsg("\n[DBG] %s : core[%x,%d] / thread %x / nbytes = %d/ cycle %d\n"193 " first byte = %c / file_id = %d / cost = %d\n",194 __FUNCTION__ , local_cxy , this->core->lid , this->trdid , nbytes , tm_start,195 *((char *)(intptr_t)paddr) , file_id , tm_end - tm_start );198 syscall_dmsg("\n[DBG] %s : core[%x,%d] / thread %x in process %x / cycle %d\n" 199 "nbytes = %d / first byte = %c / file_id = %d / cost = %d\n", 200 __FUNCTION__ , local_cxy , this->core->lid , this->trdid , this->process->pid , 201 tm_start , nbytes , *((char *)(intptr_t)paddr) , file_id , tm_end - tm_start ); 196 202 197 203 return nbytes; -
trunk/kernel/syscalls/sys_signal.c
r407 r408 33 33 void * handler ) 34 34 { 35 thread_t * this = CURRENT_THREAD;35 thread_t * this = CURRENT_THREAD; 36 36 37 printk("\n[ERROR] in %s : not implemented yet\n", __FUNCTION__ ); 38 this->errno = EINVAL; 39 return -1; 40 37 41 if((sig_id == 0) || (sig_id >= SIG_NR) || (sig_id == SIGKILL) || (sig_id == SIGSTOP)) 38 42 { -
trunk/kernel/syscalls/sys_thread_exit.c
r407 r408 73 73 74 74 // deschedule 75 sched_yield( );75 sched_yield("waiting parent join"); 76 76 } 77 77 } -
trunk/kernel/syscalls/sys_thread_join.c
r407 r408 138 138 139 139 // deschedule 140 sched_yield( );140 sched_yield("waiting child exit"); 141 141 } 142 142 } -
trunk/kernel/syscalls/sys_thread_sleep.c
r407 r408 36 36 37 37 thread_block( this , THREAD_BLOCKED_GLOBAL ); 38 sched_yield( );38 sched_yield("blocked on sleep"); 39 39 40 40 thread_dmsg("\n[DBG] %s : thread %x in process %x resume at cycle\n", -
trunk/kernel/syscalls/sys_thread_yield.c
r407 r408 27 27 int sys_thread_yield() 28 28 { 29 sched_yield( );29 sched_yield("user request"); 30 30 return 0; 31 31 } -
trunk/kernel/syscalls/sys_write.c
r407 r408 40 40 { 41 41 error_t error; 42 paddr_t paddr; 43 xptr_t file_xp; 44 uint32_t nbytes; 45 42 paddr_t paddr; // unused, but required for user space checking 43 xptr_t file_xp; // remote file extended pointer 44 uint32_t nbytes; // number of bytes actually written 45 reg_t save_sr; // required to enable IRQs during syscall 46 46 uint32_t tm_start; 47 47 uint32_t tm_end; … … 70 70 return -1; 71 71 } 72 73 // enable IRQs 74 hal_enable_irq( &save_sr ); 72 75 73 76 // get extended pointer on remote file descriptor … … 128 131 } 129 132 133 // restore IRQs 134 hal_restore_irq( save_sr ); 135 130 136 hal_fence(); 131 137 132 138 tm_end = hal_get_cycles(); 133 139 134 syscall_dmsg("\n[DBG] %s : core[%x,%d] / thread %x / nbytes = %d/ cycle %d\n"135 " first byte = %c / file_id = %d / cost = %d\n",136 __FUNCTION__ , local_cxy , this->core->lid , this->trdid , nbytes , tm_start,137 *((char *)(intptr_t)paddr) , file_id , tm_end - tm_start );140 syscall_dmsg("\n[DBG] %s : core[%x,%d] / thread %x in process %x / cycle %d\n" 141 "nbytes = %d / first byte = %c / file_id = %d / cost = %d\n", 142 __FUNCTION__ , local_cxy , this->core->lid , this->trdid , this->process->pid , 143 tm_start , nbytes , *((char *)(intptr_t)paddr) , file_id , tm_end - tm_start ); 138 144 139 145 return nbytes; -
trunk/kernel/syscalls/syscalls.h
r407 r408 41 41 * terminating thread. 42 42 ****************************************************************************************** 43 * @ exit_vallue : pointer to be returned to parent thread if th ead is attached.43 * @ exit_vallue : pointer to be returned to parent thread if thread is attached. 44 44 * @ return 0 if success / return -1 if failure. 45 45 *****************************************************************************************/ … … 154 154 155 155 /****************************************************************************************** 156 * [10] This slot not allocated yet 157 ****************************************************************************************** 158 * @ return 0 if success / returns -1 if failure. 159 *****************************************************************************************/ 160 161 /****************************************************************************************** 162 * [11] This function rmove an existing mapping defined by the <addr> and <size> 156 * [10] This function implement the exit system call terminating a POSIX process. 157 ****************************************************************************************** 158 * @ status : terminaison status (not used in present implementation). 159 *****************************************************************************************/ 160 void sys_exit( uint32_t status ); 161 162 /****************************************************************************************** 163 * [11] This function remove an existing mapping defined by the <addr> and <size> 163 164 * arguments in user space. 164 165 ****************************************************************************************** … … 194 195 * [14] This function read bytes from an open file identified by its file descriptor. 195 196 * The file can be a regular file or character oriented device. 197 * IRQs are enabled during this system call. 196 198 ****************************************************************************************** 197 199 * @ file_id : open file index in fd_array. … … 207 209 * [15] This function writes bytes to an open file identified by its file descriptor. 208 210 * The file can be a regular file or character oriented device. 211 * IRQs are enabled during this system call. 209 212 ****************************************************************************************** 210 213 * @ file_id : open file index in fd_array.
Note: See TracChangeset
for help on using the changeset viewer.