- Timestamp:
- Apr 10, 2019, 10:09:39 AM (6 years ago)
- Location:
- trunk
- Files:
-
- 73 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/Makefile
r623 r625 13 13 # Default values for hardware parameters. 14 14 # These parameters should be defined in the 'params-hard.mk' file. 15 ARCH 16 X_SIZE 17 Y_SIZE 18 NB_PROCS 19 NB_TTYS 20 IOC_TYPE 15 ARCH ?= /users/alain/soc/tsar-trunk-svn-2013/platforms/tsar_generic_iob 16 X_SIZE ?= 2 17 Y_SIZE ?= 2 18 NB_PROCS ?= 2 19 NB_TTYS ?= 3 20 IOC_TYPE ?= IOC_BDV 21 21 TXT_TYPE ?= TXT_TTY 22 22 FBF_TYPE ?= FBF_SCL … … 117 117 rm -f $(DISK_IMAGE) 118 118 ./create_dmg create $(basename $(DISK_IMAGE)) 119 dd if=$(DISK_IMAGE) of=temp.dmg count=65536 120 mv temp.dmg $(DISK_IMAGE) 121 mmd -o -i $(DISK_IMAGE) ::/bin || true 122 mmd -o -i $(DISK_IMAGE) ::/bin/kernel || true 123 mmd -o -i $(DISK_IMAGE) ::/bin/user || true 124 mmd -o -i $(DISK_IMAGE) ::/home || true 125 mdir -/ -b -i $(DISK_IMAGE) ::/ 119 dd if=$(DISK_IMAGE) of=temp.dmg count=65536 120 mv temp.dmg $(DISK_IMAGE) 121 mmd -o -i $(DISK_IMAGE) ::/bin || true 122 mmd -o -i $(DISK_IMAGE) ::/bin/kernel || true 123 mmd -o -i $(DISK_IMAGE) ::/bin/user || true 124 mmd -o -i $(DISK_IMAGE) ::/home || true 125 mcopy -o -i $(DISK_IMAGE) Makefile ::/home || true 126 mdir -/ -b -i $(DISK_IMAGE) ::/ 126 127 127 128 ############################################################## -
trunk/hal/generic/hal_context.h
r457 r625 2 2 * hal_context.h - Generic Thread Context Access API definition. 3 3 * 4 * Author Alain Greiner (2016 )4 * Author Alain Greiner (2016,2017,2018,2019) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 31 31 // and hal_fpu_context_t, defined in hal_context.c file, that are accessed with generic 32 32 // void* pointers stored in the thread descriptor. 33 // - the "hal_c ontext_t" struct is used forthe CPU registers values at context switch.34 // - the "hal_fpu_context_t" struct is used for the FPU registers when required.33 // - the "hal_cpu_context_t" struct saves the CPU registers values at context switch. 34 // - the "hal_fpu_context_t" struct saves the FPU registers values at FPU switch. 35 35 ////////////////////////////////////////////////////////////////////////////////////////// 36 36 … … 56 56 57 57 /**************************************************************************************** 58 * This function is used to implement the fork() system call. 59 * 1) It saves in a remote (child) thread CPU context the current CPU registers values. 60 * Three slots are not simple copies of the parent registers values : 61 * - the thread pointer is set to the child thread local pointer. 62 * - the stack pointer is set to parrent SP + (child_base - parent_base). 63 * - the status register is set to kernel mode with IRQ disabled. 64 * 2) It copies the content of the calling (parent) thread kernel_stack, 65 * to the remote (child) thread kernel_stack. 58 * This function is called the sys_fork() function to complete the fork mechanism. 59 * It is called by th local parent thread to initialize the CPU context of the remote 60 * child thread, identified by the <thread_xp> argument. 61 * It makes three actions: 62 * 1) It copies the current values of the CPU registers of the core running the parent 63 * thread to the remote child CPU context. 64 * 2) It patches four slots of this remote child CPU context: 65 * - the c0_th slot is set to the child thread descriptor pointer. 66 * - the sp_29 slot is set to the child kernel stack pointer. 67 * - the c0_sr slot is set to kernel mode with IRQ disabled. 68 * - the c2_ptpr slot is set to the child process GPT value. 69 * 3) It copies the content of the parent thread kernel_stack, to the child thread 70 * kernel_stack, because the COW mechanism is not available on architectures where 71 * the data MMU is de-activated in kernel mode. 66 72 **************************************************************************************** 67 * @ thread_xp : extended pointer on the remotethread descriptor.73 * @ thread_xp : extended pointer on the child thread descriptor. 68 74 ***************************************************************************************/ 69 75 void hal_cpu_context_fork( xptr_t thread_xp ); -
trunk/hal/generic/hal_gpt.h
r624 r625 167 167 168 168 /**************************************************************************************** 169 * This function is used to implement the "fork" system call: It copies one GPT entry 170 * identified by the <vpn> argument, from a remote <src_gpt_xp> to a local <dst_gpt>. 169 * This function is used to implement the "fork" system call: It copies a remote 170 * source PTE, identified by the <src_gpt_xp> and <src_vpn> arguments, to a local 171 * destination PTE, identified by the <dst_gpt> and <dst_vpn> arguments. 171 172 * It does nothing if the source PTE is not MAPPED and SMALL. 172 173 * It optionnally activates the "Copy on Write" mechanism: when the <cow> argument is 173 174 * true: the GPT_WRITABLE flag is reset, and the GPT_COW flag is set. 174 * A new second level PT2 (s) is allocated fordestination GPT if required.175 * A new second level PT2 is allocated for the destination GPT if required. 175 176 * It returns in the <ppn> and <mapped> arguments the PPN value for the copied PTE, 176 177 * and a boolean indicating if the PTE is mapped and small, and was actually copied. 177 178 **************************************************************************************** 178 * @ dst_gpt : [in] local pointer on the local destination GPT. 179 * @ src_gpt_xp : [in] extended pointer on the remote source GPT. 180 * @ vpn_base : [in] vpn defining the PTE to be copied. 179 * @ dst_gpt : [in] local pointer on local destination GPT. 180 * @ dst_vpn : [in] vpn defining the PTE in the desination GPT. 181 * @ src_gpt_xp : [in] extended pointer on remote source GPT. 182 * @ src_vpn : [in] vpn defining the PTE in the source GPT. 181 183 * @ cow : [in] activate the COPY-On-Write mechanism if true. 182 184 * @ ppn : [out] PPN value (only if mapped is true). … … 185 187 ***************************************************************************************/ 186 188 error_t hal_gpt_pte_copy( gpt_t * dst_gpt, 189 vpn_t dst_vpn, 187 190 xptr_t src_gpt_xp, 188 vpn_t vpn,191 vpn_t src_vpn, 189 192 bool_t cow, 190 193 ppn_t * ppn, -
trunk/hal/generic/hal_special.h
r624 r625 101 101 * This function returns the current value of stack pointer from core register. 102 102 ****************************************************************************************/ 103 uint32_t hal_get_sp( void ); 104 105 /***************************************************************************************** 106 * This function returns the current value of the return adddress from core register. 107 ****************************************************************************************/ 108 uint32_t hal_get_ra( void ); 109 110 /***************************************************************************************** 111 * This function registers a new value in the core stack pointer and returns previous one. 112 ****************************************************************************************/ 113 inline uint32_t hal_set_sp( void * new_val ); 103 reg_t hal_get_sp( void ); 114 104 115 105 /***************************************************************************************** 116 106 * This function returns the faulty address in case of address exception. 117 107 ****************************************************************************************/ 118 uint32_t hal_get_bad_vaddr( void );108 reg_t hal_get_bad_vaddr( void ); 119 109 120 110 /***************************************************************************************** -
trunk/hal/generic/hal_vmm.h
r623 r625 59 59 error_t hal_vmm_kernel_update( struct process_s * process ); 60 60 61 /**************************************************************************************** 62 * Depending on the hardware architecture, this function displays the current state 63 * of the VMM of the process identified by the <process> argument. 64 * It displays all valit GPT entries when the <mapping> argument is true. 65 **************************************************************************************** 66 * @ process : local pointer on user process descriptor. 67 * @ return 0 if success / return ENOMEM if failure. 68 ***************************************************************************************/ 69 void hal_vmm_display( struct process_s * process, 70 bool_t mapping ); 71 72 73 61 74 #endif /* HAL_VMM_H_ */ -
trunk/hal/tsar_mips32/core/hal_context.c
r570 r625 152 152 { 153 153 context->a0_04 = (uint32_t)thread->entry_args; 154 context->sp_29 = (uint32_t)thread->u _stack_base + (uint32_t)thread->u_stack_size- 8;154 context->sp_29 = (uint32_t)thread->user_stack_vseg->max - 8; 155 155 context->ra_31 = (uint32_t)&hal_kentry_eret; 156 156 context->c0_epc = (uint32_t)thread->entry_func; … … 175 175 void hal_cpu_context_fork( xptr_t child_xp ) 176 176 { 177 // allocate a local CPU context in kernel stack178 // It is initialized from local parent context179 // and from child specific values, and is copied in 180 // in the remote child context using a remote_memcpy()177 // get pointer on calling thread 178 thread_t * this = CURRENT_THREAD; 179 180 // allocate a local CPU context in parent kernel stack 181 181 hal_cpu_context_t context; 182 182 183 // get local parent thread local pointer 183 // get local parent thread cluster and local pointer 184 cxy_t parent_cxy = local_cxy; 184 185 thread_t * parent_ptr = CURRENT_THREAD; 185 186 … … 188 189 thread_t * child_ptr = GET_PTR( child_xp ); 189 190 190 // get remote child cpu_context local pointer191 // get local pointer on remote child cpu context 191 192 char * child_context_ptr = hal_remote_lpt( XPTR(child_cxy , &child_ptr->cpu_context) ); 192 193 193 194 // get local pointer on remote child process 194 process_t * process = (process_t *)hal_remote_lpt( XPTR(child_cxy , &child_ptr->process) );195 process_t * process = hal_remote_lpt( XPTR(child_cxy , &child_ptr->process) ); 195 196 196 197 // get ppn of remote child process page table 197 uint32_t pt_ppn = hal_remote_l32( XPTR(child_cxy , &process->vmm.gpt.ppn) ); 198 199 // save CPU registers in local CPU context 198 uint32_t pt_ppn = hal_remote_l32( XPTR(child_cxy , &process->vmm.gpt.ppn) ); 199 200 // get local pointer on parent uzone from parent thread descriptor 201 uint32_t * parent_uzone = parent_ptr->uzone_current; 202 203 // compute local pointer on child uzone 204 uint32_t * child_uzone = (uint32_t *)( (intptr_t)parent_uzone + 205 (intptr_t)child_ptr - 206 (intptr_t)parent_ptr ); 207 208 // update the uzone pointer in child thread descriptor 209 hal_remote_spt( XPTR( child_cxy , &child_ptr->uzone_current ) , child_uzone ); 210 211 #if DEBUG_HAL_CONTEXT 212 uint32_t cycle = (uint32_t)hal_get_cycles(); 213 if( DEBUG_HAL_CONTEXT < cycle ) 214 printk("\n[%s] thread[%x,%x] parent_uzone %x / child_uzone %x / cycle %d\n", 215 __FUNCTION__, this->process->pid, this->trdid, parent_uzone, child_uzone, cycle ); 216 #endif 217 218 // copy parent kernel stack to child thread descriptor 219 // (this includes the uzone, that is allocated in the kernel stack) 220 char * parent_ksp = (char *)hal_get_sp(); 221 char * child_ksp = (char *)((intptr_t)parent_ksp + 222 (intptr_t)child_ptr - 223 (intptr_t)parent_ptr ); 224 225 uint32_t size = (uint32_t)parent_ptr + CONFIG_THREAD_DESC_SIZE - (uint32_t)parent_ksp; 226 227 hal_remote_memcpy( XPTR( child_cxy , child_ksp ), 228 XPTR( local_cxy , parent_ksp ), 229 size ); 230 231 #if DEBUG_HAL_CONTEXT 232 cycle = (uint32_t)hal_get_cycles(); 233 printk("\n[%s] thread[%x,%x] copied kstack from parent %x to child %x / cycle %d\n", 234 __FUNCTION__, this->process->pid, this->trdid, parent_ptr, child_ptr, cycle ); 235 #endif 236 237 // patch the user stack pointer slot in the child uzone[UZ_SP] 238 // because parent and child use the same offset to access the user stack, 239 // but parent and child do not have the same user stack base address. 240 uint32_t parent_us_base = parent_ptr->user_stack_vseg->min; 241 vseg_t * child_us_vseg = hal_remote_lpt( XPTR( child_cxy , &child_ptr->user_stack_vseg ) ); 242 uint32_t child_us_base = hal_remote_l32( XPTR( child_cxy , &child_us_vseg->min ) ); 243 uint32_t parent_usp = parent_uzone[UZ_SP]; 244 uint32_t child_usp = parent_usp + child_us_base - parent_us_base; 245 246 hal_remote_s32( XPTR( child_cxy , &child_uzone[UZ_SP] ) , child_usp ); 247 248 #if DEBUG_HAL_CONTEXT 249 cycle = (uint32_t)hal_get_cycles(); 250 printk("\n[%s] thread[%x,%x] parent_usp %x / child_usp %x / cycle %d\n", 251 __FUNCTION__, this->process->pid, this->trdid, parent_usp, child_usp, cycle ); 252 #endif 253 254 // save current values of CPU registers to local CPU context 200 255 hal_do_cpu_save( &context ); 201 256 202 // From this point, both parent and child threads execute the following code. 203 // They can be distinguished by the CURRENT_THREAD value, and child will only 204 // execute it when it is unblocked by parent, after return to sys_fork(). 205 // - parent thread copies user stack, and patch sp_29 / c0_th / C0_sr / c2_ptpr 206 // - child thread does nothing 207 208 thread_t * current = CURRENT_THREAD; 209 210 if( current == parent_ptr ) // current == parent thread 257 // From this point, both parent and child can execute the following code, 258 // but child thread will only execute it after being unblocked by parent thread. 259 // They can be distinguished by the (CURRENT_THREAD,local_cxy) values, 260 // and we must re-initialise the calling thread pointer from c0_th register 261 262 this = CURRENT_THREAD; 263 264 if( (this == parent_ptr) && (local_cxy == parent_cxy) ) // parent thread 211 265 { 212 // get parent and child stack pointers 213 char * parent_sp = (char *)context.sp_29; 214 char * child_sp = (char *)((intptr_t)parent_sp + 215 (intptr_t)child_ptr - 216 (intptr_t)parent_ptr ); 217 218 // patch kernel_stack pointer, current thread, and status slots 219 context.sp_29 = (uint32_t)child_sp; 266 // patch 4 slots in the local CPU context: the sp_29 / c0_th / C0_sr / c2_ptpr 267 // slots are not identical in parent and child 268 context.sp_29 = context.sp_29 + (intptr_t)child_ptr - (intptr_t)parent_ptr; 220 269 context.c0_th = (uint32_t)child_ptr; 221 270 context.c0_sr = SR_SYS_MODE; 222 271 context.c2_ptpr = pt_ppn >> 1; 223 272 224 // copy local context to remote child context)273 // copy this patched context to remote child context 225 274 hal_remote_memcpy( XPTR( child_cxy , child_context_ptr ), 226 275 XPTR( local_cxy , &context ) , 227 276 sizeof( hal_cpu_context_t ) ); 228 229 // copy kernel stack content from local parent thread to remote child thread 230 uint32_t size = (uint32_t)parent_ptr + CONFIG_THREAD_DESC_SIZE - (uint32_t)parent_sp; 231 hal_remote_memcpy( XPTR( child_cxy , child_sp ), 232 XPTR( local_cxy , parent_sp ), 233 size ); 277 #if DEBUG_HAL_CONTEXT 278 cycle = (uint32_t)hal_get_cycles(); 279 printk("\n[%s] thread[%x,%x] copied CPU context to child / cycle %d\n", 280 __FUNCTION__, this->process->pid, this->trdid, cycle ); 281 #endif 282 283 // parent thread unblock child thread 284 thread_unblock( XPTR( child_cxy , child_ptr ) , THREAD_BLOCKED_GLOBAL ); 285 286 #if DEBUG_HAL_CONTEXT 287 cycle = (uint32_t)hal_get_cycles(); 288 printk("\n[%s] thread[%x,%x] unblocked child thread / cycle %d\n", 289 __FUNCTION__, this->process->pid, this->trdid, cycle ); 290 #endif 291 234 292 } 235 else // current == child thread 236 { 237 assert( (current == child_ptr) , "current = %x / child = %x\n"); 238 } 293 239 294 } // end hal_cpu_context_fork() 240 295 … … 285 340 void hal_cpu_context_destroy( thread_t * thread ) 286 341 { 287 kmem_req_t req; 288 289 req.type = KMEM_CPU_CTX; 290 req.ptr = thread->cpu_context; 291 kmem_free( &req ); 342 kmem_req_t req; 343 344 hal_cpu_context_t * ctx = thread->cpu_context; 345 346 // release CPU context if required 347 if( ctx != NULL ) 348 { 349 req.type = KMEM_CPU_CTX; 350 req.ptr = ctx; 351 kmem_free( &req ); 352 } 292 353 293 354 } // end hal_cpu_context_destroy() … … 348 409 kmem_req_t req; 349 410 350 req.type = KMEM_FPU_CTX; 351 req.ptr = thread->fpu_context; 352 kmem_free( &req ); 411 hal_fpu_context_t * context = thread->fpu_context; 412 413 // release FPU context if required 414 if( context != NULL ) 415 { 416 req.type = KMEM_FPU_CTX; 417 req.ptr = context; 418 kmem_free( &req ); 419 } 353 420 354 421 } // end hal_fpu_context_destroy() -
trunk/hal/tsar_mips32/core/hal_exception.c
r619 r625 189 189 if( CURRENT_THREAD->type != THREAD_USER ) 190 190 { 191 printk("\n[ KERNELPANIC] in %s : illegal thread type %s\n",191 printk("\n[PANIC] in %s : illegal thread type %s\n", 192 192 __FUNCTION__, thread_type_str(CURRENT_THREAD->type) ); 193 193 … … 250 250 else if( error == EXCP_USER_ERROR ) // illegal vaddr 251 251 { 252 printk("\n[ USERERROR] in %s : thread[%x,%x] on core[%x,%x] / cycle %d\n"252 printk("\n[ERROR] in %s : thread[%x,%x] on core[%x,%x] / cycle %d\n" 253 253 " %s : epc %x / badvaddr %x / is_ins %d\n", 254 254 __FUNCTION__, this->process->pid, this->trdid, local_cxy, … … 260 260 else // error == EXCP_KERNEL_PANIC 261 261 { 262 printk("\n[ KERNELPANIC] in %s : thread[%x,%x] on core[%x,%x] / cycle %d\n"262 printk("\n[PANIC] in %s : thread[%x,%x] on core[%x,%x] / cycle %d\n" 263 263 " %s : epc %x / badvaddr %x / is_ins %d\n", 264 264 __FUNCTION__, this->process->pid, this->trdid, local_cxy, … … 272 272 case MMU_READ_PRIVILEGE_VIOLATION: // illegal 273 273 { 274 printk("\n[ USERERROR] in %s : thread[%x,%x] on core[%x,%x] / cycle %d\n"274 printk("\n[ERROR] in %s : thread[%x,%x] on core[%x,%x] / cycle %d\n" 275 275 " %s : epc %x / badvaddr %x / is_ins %d\n", 276 276 __FUNCTION__, this->process->pid, this->trdid, local_cxy, … … 299 299 else if( error == EXCP_USER_ERROR ) // illegal write access 300 300 { 301 printk("\n[ USERERROR] in %s : thread[%x,%x] on core[%x,%x] / cycle %d\n"301 printk("\n[ERROR] in %s : thread[%x,%x] on core[%x,%x] / cycle %d\n" 302 302 " %s : epc %x / badvaddr %x / is_ins %d\n", 303 303 __FUNCTION__, this->process->pid, this->trdid, local_cxy, … … 309 309 else // error == EXCP_KERNEL_PANIC 310 310 { 311 printk("\n[ KERNELPANIC] in %s : thread[%x,%x] on core[%x,%x] / cycle %d\n"311 printk("\n[PANIC] in %s : thread[%x,%x] on core[%x,%x] / cycle %d\n" 312 312 " %s : epc %x / badvaddr %x / is_ins %d\n", 313 313 __FUNCTION__, this->process->pid, this->trdid, local_cxy, … … 320 320 case MMU_READ_EXEC_VIOLATION: // user error 321 321 { 322 printk("\n[ USERERROR] in %s : thread[%x,%x] on core[%x,%x] / cycle %d\n"322 printk("\n[ERROR] in %s : thread[%x,%x] on core[%x,%x] / cycle %d\n" 323 323 " %s : epc %x / badvaddr %x / is_ins %d\n", 324 324 __FUNCTION__, this->process->pid, this->trdid, local_cxy, … … 330 330 default: // this is a kernel error 331 331 { 332 printk("\n[ KERNELPANIC] in %s : thread[%x,%x] on core[%x,%x] / cycle %d\n"332 printk("\n[PANIC] in %s : thread[%x,%x] on core[%x,%x] / cycle %d\n" 333 333 " %s : epc %x / badvaddr %x / is_ins %d\n", 334 334 __FUNCTION__, this->process->pid, this->trdid, local_cxy, … … 346 346 ////////////////////////////////////////////////////////////////////////////////////////// 347 347 // @ this : pointer on faulty thread descriptor. 348 // @ error : EXCP_USER_ERROR or EXCP_KERNEL_PANIC 349 ////////////////////////////////////////////////////////////////////////////////////////// 350 static void hal_exception_dump( thread_t * this, 351 error_t error ) 348 ////////////////////////////////////////////////////////////////////////////////////////// 349 static void hal_exception_dump( thread_t * this ) 352 350 { 353 351 core_t * core = this->core; … … 366 364 remote_busylock_acquire( lock_xp ); 367 365 368 if( error == EXCP_USER_ERROR ) 369 { 370 nolock_printk("\n=== USER ERROR / thread(%x,%x) / core[%d] / cycle %d ===\n", 371 process->pid, this->trdid, core->lid, (uint32_t)hal_get_cycles() ); 372 } 373 else 374 { 375 nolock_printk("\n=== KERNEL PANIC / thread(%x,%x) / core[%d] / cycle %d ===\n", 376 process->pid, this->trdid, core->lid, (uint32_t)hal_get_cycles() ); 377 } 366 nolock_printk("\n=== thread(%x,%x) / core[%d] / cycle %d ===\n", 367 process->pid, this->trdid, core->lid, (uint32_t)hal_get_cycles() ); 378 368 379 369 nolock_printk("busylocks = %d / blocked_vector = %X / flags = %X\n\n", … … 507 497 if( error == EXCP_USER_ERROR ) // user error => kill user process 508 498 { 509 hal_exception_dump( this , error);499 hal_exception_dump( this ); 510 500 511 501 sys_exit( EXIT_FAILURE ); … … 513 503 else if( error == EXCP_KERNEL_PANIC ) // kernel error => kernel panic 514 504 { 515 hal_exception_dump( this , error);505 hal_exception_dump( this ); 516 506 517 507 hal_core_sleep(); -
trunk/hal/tsar_mips32/core/hal_gpt.c
r624 r625 823 823 /////////////////////////////////////////// 824 824 error_t hal_gpt_pte_copy( gpt_t * dst_gpt, 825 vpn_t dst_vpn, 825 826 xptr_t src_gpt_xp, 826 vpn_t vpn,827 vpn_t src_vpn, 827 828 bool_t cow, 828 829 ppn_t * ppn, 829 830 bool_t * mapped ) 830 831 { 831 uint32_t ix1; // index in PT1 832 uint32_t ix2; // index in PT2 832 uint32_t src_ix1; // index in SRC PT1 833 uint32_t src_ix2; // index in SRC PT2 834 835 uint32_t dst_ix1; // index in DST PT1 836 uint32_t dst_ix2; // index in DST PT2 833 837 834 838 cxy_t src_cxy; // SRC GPT cluster … … 862 866 thread_t * this = CURRENT_THREAD; 863 867 if( DEBUG_HAL_GPT_COPY < cycle ) 864 printk("\n[%s] : thread[%x,%x] enter / vpn %x /src_cxy %x / dst_cxy %x / cycle %d\n",865 __FUNCTION__, this->process->pid, this->trdid, vpn,src_cxy, local_cxy, cycle );868 printk("\n[%s] : thread[%x,%x] enter / src_cxy %x / dst_cxy %x / cycle %d\n", 869 __FUNCTION__, this->process->pid, this->trdid, src_cxy, local_cxy, cycle ); 866 870 #endif 867 871 … … 878 882 assert( (dst_pt1 != NULL) , "dst_pt1 does not exist\n"); 879 883 880 ix1 = TSAR_MMU_IX1_FROM_VPN( vpn ); 881 ix2 = TSAR_MMU_IX2_FROM_VPN( vpn ); 884 // compute SRC indexes 885 src_ix1 = TSAR_MMU_IX1_FROM_VPN( src_vpn ); 886 src_ix2 = TSAR_MMU_IX2_FROM_VPN( src_vpn ); 887 888 // compute DST indexes 889 dst_ix1 = TSAR_MMU_IX1_FROM_VPN( dst_vpn ); 890 dst_ix2 = TSAR_MMU_IX2_FROM_VPN( dst_vpn ); 882 891 883 892 // get src_pte1 884 src_pte1 = hal_remote_l32( XPTR( src_cxy , &src_pt1[ ix1] ) );893 src_pte1 = hal_remote_l32( XPTR( src_cxy , &src_pt1[src_ix1] ) ); 885 894 886 895 // do nothing if src_pte1 not MAPPED or not SMALL … … 888 897 { 889 898 // get dst_pt1 entry 890 dst_pte1 = dst_pt1[ ix1];899 dst_pte1 = dst_pt1[dst_ix1]; 891 900 892 901 // map dst_pte1 if required … … 915 924 916 925 // register it in DST_GPT 917 dst_pt1[ ix1] = dst_pte1;926 dst_pt1[dst_ix1] = dst_pte1; 918 927 } 919 928 … … 927 936 928 937 // get attr and ppn from SRC_PT2 929 src_pte2_attr = hal_remote_l32( XPTR( src_cxy , &src_pt2[2 * ix2] ) );930 src_pte2_ppn = hal_remote_l32( XPTR( src_cxy , &src_pt2[2 * ix2 + 1] ) );938 src_pte2_attr = hal_remote_l32( XPTR( src_cxy , &src_pt2[2 * src_ix2] ) ); 939 src_pte2_ppn = hal_remote_l32( XPTR( src_cxy , &src_pt2[2 * src_ix2 + 1] ) ); 931 940 932 941 // do nothing if src_pte2 not MAPPED … … 934 943 { 935 944 // set PPN in DST PTE2 936 dst_pt2[2 *ix2+1] = src_pte2_ppn;945 dst_pt2[2 * dst_ix2 + 1] = src_pte2_ppn; 937 946 938 947 // set attributes in DST PTE2 939 948 if( cow && (src_pte2_attr & TSAR_MMU_WRITABLE) ) 940 949 { 941 dst_pt2[2 *ix2] = (src_pte2_attr | TSAR_MMU_COW) & (~TSAR_MMU_WRITABLE);950 dst_pt2[2 * dst_ix2] = (src_pte2_attr | TSAR_MMU_COW) & (~TSAR_MMU_WRITABLE); 942 951 } 943 952 else 944 953 { 945 dst_pt2[2 *ix2] = src_pte2_attr;954 dst_pt2[2 * dst_ix2] = src_pte2_attr; 946 955 } 947 956 … … 953 962 cycle = (uint32_t)hal_get_cycles; 954 963 if( DEBUG_HAL_GPT_COPY < cycle ) 955 printk("\n[%s] : thread[%x,%x] exit / copy done for vpn %x / cycle %d\n",956 __FUNCTION__, this->process->pid, this->trdid, vpn, cycle );964 printk("\n[%s] : thread[%x,%x] exit / copy done for src_vpn %x / dst_vpn %x / cycle %d\n", 965 __FUNCTION__, this->process->pid, this->trdid, src_vpn, dst_vpn, cycle ); 957 966 #endif 958 967 … … 970 979 cycle = (uint32_t)hal_get_cycles; 971 980 if( DEBUG_HAL_GPT_COPY < cycle ) 972 printk("\n[%s] : thread[%x,%x] exit / nothing done for vpn %x/ cycle %d\n",973 __FUNCTION__, this->process->pid, this->trdid, vpn,cycle );981 printk("\n[%s] : thread[%x,%x] exit / nothing done / cycle %d\n", 982 __FUNCTION__, this->process->pid, this->trdid, cycle ); 974 983 #endif 975 984 -
trunk/hal/tsar_mips32/core/hal_kentry.S
r438 r625 4 4 * AUthors Ghassan Almaless (2007,2008,2009,2010,2011,2012) 5 5 * Mohamed Lamine Karaoui (2015) 6 * Alain Greiner (201 7)6 * Alain Greiner (2016,2017,2018,2019) 7 7 * 8 8 * Copyright (c) UPMC Sorbonne Universites … … 87 87 #------------------------------------------------------------------------------------ 88 88 # Kernel Entry point for Interrupt / Exception / Syscall 89 # The c2_dext and c2_iext CP2 registersmust have been previously set90 # to "local_cxy", because the kernel run with MMU desactivated.89 # The c2_dext CP2 register must have been previously set 90 # to "local_cxy", because the kernel run with data MMU desactivated. 91 91 #------------------------------------------------------------------------------------ 92 92 … … 96 96 andi $26, $26, 0x10 # test User Mode bit 97 97 beq $26, $0, kernel_mode # jump if core already in kernel 98 ori $27, $0, 0x 3 # $27 <= code forMMU OFF98 ori $27, $0, 0xB # $27 <= code data MMU OFF 99 99 100 100 #------------------------------------------------------------------------------------ … … 102 102 # to handle a syscall, an interrupt, or an user exception. 103 103 # - save current c2_mode in $26. 104 # - set MMU OFF.104 # - set data MMU OFF. 105 105 # - copy user stack pointer in $27 to be saved in uzone. 106 # - set kernel stack pointer in $29 == top_kernel_stack(this).106 # - set kernel stack pointer in $29 (kernel stack empty at firts entry). 107 107 108 108 user_mode: 109 109 110 110 mfc2 $26, $1 # $26 <= c2_mode 111 mtc2 $27, $1 # set MMU OFF111 mtc2 $27, $1 # set data MMU OFF 112 112 move $27, $29 # $27 <= user stack pointer 113 113 mfc0 $29, $4, 2 # get pointer on thread descriptor from c0_th … … 121 121 # after a syscall, to handle an interrupt, or to handle a non-fatal exception. 122 122 # - save current c2_mode in $26. 123 # - set MMU OFF.123 # - set data MMU OFF. 124 124 # - copy current kernel stack pointer in $27. 125 125 … … 127 127 128 128 mfc2 $26, $1 # $26 <= c2_mode 129 mtc2 $27, $1 # set MMU OFF129 mtc2 $27, $1 # set data MMU OFF 130 130 move $27, $29 # $27 <= current kernel stack pointer 131 131 … … 133 133 # This code is executed in both modes (user or kernel): 134 134 # The assumptions are: 135 # - c2_mode contains the MMU OFF value.135 # - c2_mode contains the data MMU OFF value. 136 136 # - $26 contains the previous c2_mode value. 137 137 # - $27 contains the previous sp value (can be usp or ksp). … … 139 139 # We execute the following actions: 140 140 # - decrement $29 to allocate an uzone in kernel stack 141 # - save relevantGPR, CP0 and CP2 registers to uzone.142 # - set the SR in kernel mode: IRQ disabled, clear exl.141 # - save GPR, CP0 and CP2 registers to uzone. 142 # - set the SR in kernel mode: IRQ disabled, clear EXL. 143 143 144 144 unified_mode: … … 195 195 sw $26, (UZ_MODE*4)($29) # save previous c2_mode (can be user or kernel) 196 196 197 mfc0 $3, $12 197 mfc0 $3, $12 # $3 <= c0_sr 198 198 srl $3, $3, 5 199 199 sll $3, $3, 5 # reset 5 LSB bits … … 216 216 nop 217 217 move $4, $2 218 jal putd 219 nop 220 la $4, msg_crlf 221 jal puts 222 nop 223 # display saved CR value 224 la $4, msg_cr 225 jal puts 226 nop 227 lw $4, (UZ_CR*4)($29) 218 228 jal putx 219 229 nop … … 286 296 287 297 #------------------------------------------------------------------------------------ 288 # This code handle the uzone pointers stack, and calls the relevant298 # This code handle the two-slots uzone pointers stack, and calls the relevant 289 299 # Interrupt / Exception / Syscall handler, depending on XCODE in CP0_CR. 290 300 # Both the hal_do_syscall() and the hal_do_exception() functions use … … 338 348 # - All registers saved in the uzone are restored, using the pointer on uzone, 339 349 # that is contained in $29. 340 # - The " uzone" field in thread descriptor, that has beeen modified at kernel entry341 # is restored from value contained in the uzone[UZ_SP] slot.350 # - The "current_uzone" pointer in thread descriptor, that has beeen modified at 351 # kernel entry is restored from value contained in the uzone[UZ_SP] slot. 342 352 # ----------------------------------------------------------------------------------- 343 353 … … 365 375 nop 366 376 move $4, $2 377 jal putd 378 nop 379 la $4, msg_crlf 380 jal puts 381 nop 382 # display saved CR value 383 la $4, msg_cr 384 jal puts 385 nop 386 lw $4, (UZ_CR*4)($29) 367 387 jal putx 368 388 nop … … 479 499 480 500 lw $26, (UZ_MODE*4)($27) 481 mtc2 $26, $1 # restore CP2_MODEfrom uzone501 mtc2 $26, $1 # restore c2_mode from uzone 482 502 483 503 # ----------------------------------------------------------------------------------- … … 494 514 .section .kdata 495 515 516 msg_cr: 517 .align 2 518 .asciiz "- UZ_CR = " 496 519 msg_sp: 497 520 .align 2 -
trunk/hal/tsar_mips32/core/hal_kentry.h
r481 r625 1 1 /* 2 * hal_kentry.h - MIPS32 registers mnemonics2 * hal_kentry.h - uzone definition 3 3 * 4 * Copyright (c) 2008,2009,2010,2011,2012 Ghassan Almaless 5 * Copyright (c) 2011,2012 UPMC Sorbonne Universites 4 * Author Alain Greiner (2016,2017,2018,2019) 5 * 6 * Copyright (c) UPMC Sorbonne Universites 6 7 * 7 * This file is part of ALMOS- kernel.8 * This file is part of ALMOS-MKH. 8 9 * 9 * ALMOS- kernelis free software; you can redistribute it and/or modify it10 * ALMOS-MKH is free software; you can redistribute it and/or modify it 10 11 * under the terms of the GNU General Public License as published by 11 12 * the Free Software Foundation; version 2.0 of the License. 12 13 * 13 * ALMOS- kernelis distributed in the hope that it will be useful, but14 * ALMOS-MKH is distributed in the hope that it will be useful, but 14 15 * WITHOUT ANY WARRANTY; without even the implied warranty of 15 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU … … 28 29 // a fixed size array of 32 bits integers, used by the kentry function to save/restore 29 30 // the MIPS32 CPU registers, at each exception / interruption / syscall. 30 // It also defines several initial values for the SR register.31 31 // 32 32 // This file is included in the hal_kentry.S, hal_syscall.c, hal_exception.c, … … 36 36 37 37 /**************************************************************************************** 38 * This structure defines the cpu_uzone dynamically allocated in the kernel stack 39 * by the hal_kentry assembly code for the TSAR_MIPS32 architecture. 38 * This structure defines the "uzone" dynamically allocated in the kernel stack 39 * by the hal_kentry assembly code to save the MIPS32 registers each time a core 40 * enters the kernel to handle an interrupt, exception, or syscall. 41 * These define are specific for the TSAR_MIPS32 architecture. 42 * 40 43 * WARNING : It is replicated in hal_kentry.S file. 41 44 ***************************************************************************************/ … … 87 90 * The hal_kentry_enter() function is the unique kernel entry point in case of 88 91 * exception, interrupt, or syscall for the TSAR_MIPS32 architecture. 89 * It can be executed by a core in user mode (in case of exception or syscall), 90 * or by a core already in kernel mode (in case of interrupt). 91 * 92 * It can be executed by a core in user mode or by a core already in kernel mode 93 * (in case of interrupt or non fatal exception). 92 94 * In both cases it allocates an "uzone" space in the kernel stack to save the 93 95 * CPU registers values, desactivates the MMU, and calls the relevant handler … … 95 97 * 96 98 * After handler execution, it restores the CPU context from the uzone and jumps 97 * to address contained in EPC calling hal_kentry_eret()99 * to address contained in EPC calling the hal_kentry_eret() function. 98 100 ************************************************************************************/ 99 101 void hal_kentry_enter( void ); … … 101 103 /************************************************************************************* 102 104 * The hal_kentry_eret() function contains only the assembly "eret" instruction, 103 * that reset the EXL bit in the c0_sr register, and jump to the address105 * that reset the EXL bit in the c0_sr register, and jumps to the address 104 106 * contained in the c0_epc register. 105 107 * ************************************************************************************/ -
trunk/hal/tsar_mips32/core/hal_remote.c
r610 r625 381 381 uint32_t scxy = (uint32_t)GET_CXY( src ); 382 382 383 /*384 if( local_cxy == 1 )385 printk("\n@@@ %s : scxy %x / sptr %x / dcxy %x / dptr %x\n",386 __FUNCTION__, scxy, sptr, dcxy, dptr );387 */388 383 hal_disable_irq( &save_sr ); 389 384 -
trunk/hal/tsar_mips32/core/hal_special.c
r624 r625 40 40 extern cxy_t local_cxy; 41 41 extern void hal_kentry_enter( void ); 42 43 //////////////////////////////////////////////////////////////////////////////// 44 // For the TSAR architecture, this function registers the address of the 45 // hal_kentry_enter() function in the MIPS32 cp0_ebase register. 46 //////////////////////////////////////////////////////////////////////////////// 47 void hal_set_kentry( void ) 48 { 49 uint32_t kentry = (uint32_t)(&hal_kentry_enter); 50 51 asm volatile("mtc0 %0, $15, 1" : : "r" (kentry) ); 52 } 42 53 43 54 ///////////////////////////////////////////////////////////////////////////////// … … 48 59 void hal_mmu_init( gpt_t * gpt ) 49 60 { 50 51 // set PT1 base address in mmu_ptpr register 61 // set PT1 base address in cp2_ptpr register 52 62 uint32_t ptpr = (((uint32_t)gpt->ptr) >> 13) | (local_cxy << 19); 53 63 asm volatile ( "mtc2 %0, $0 \n" : : "r" (ptpr) ); 54 64 55 // set ITLB | ICACHE | DCACHE bits in mmu_mode register65 // set ITLB | ICACHE | DCACHE bits in cp2_mode register 56 66 asm volatile ( "ori $26, $0, 0xB \n" 57 67 "mtc2 $26, $1 \n" ); … … 59 69 60 70 //////////////////////////////////////////////////////////////////////////////// 61 // For the TSAR architecture, this function registers the address of the 62 // hal_kentry_enter() function in the MIPS32 cp0_ebase register. 63 //////////////////////////////////////////////////////////////////////////////// 64 void hal_set_kentry( void ) 65 { 66 uint32_t kentry = (uint32_t)(&hal_kentry_enter); 67 68 asm volatile("mtc0 %0, $15, 1" : : "r" (kentry) ); 69 } 70 71 //////////////////////////////// 71 // For the TSAR architecture, this function returns the current value 72 // of the 32 bits c0_sr register 73 //////////////////////////////////////////////////////////////////////////////// 74 inline reg_t hal_get_sr( void ) 75 { 76 reg_t sr; 77 78 asm volatile ("mfc0 %0, $12" : "=&r" (sr)); 79 80 return sr; 81 } 82 83 //////////////////////////////////////////////////////////////////////////////// 84 // For the TSAR architecture, this function returns the 10 LSB bits 85 // of the 32 bits c0_ebase register : Y (4 bits) | Y (4 bits) | LID (2 bits) 86 //////////////////////////////////////////////////////////////////////////////// 72 87 inline gid_t hal_get_gid( void ) 73 88 { … … 79 94 } 80 95 81 /////////////////////////////////// 96 //////////////////////////////////////////////////////////////////////////////// 97 // For the TSAR architecture, this function returns the current value 98 // of the 32 bits c0_count cycle counter. 99 //////////////////////////////////////////////////////////////////////////////// 82 100 inline reg_t hal_time_stamp( void ) 83 101 { … … 87 105 88 106 return count; 89 }90 91 ///////////////////////////////92 inline reg_t hal_get_sr( void )93 {94 reg_t sr;95 96 asm volatile ("mfc0 %0, $12" : "=&r" (sr));97 98 return sr;99 107 } 100 108 … … 131 139 } 132 140 133 /////////////////////////////////////////////////////// 141 //////////////////////////////////////////////////////////////////////////////// 142 // For the TSAR architecture, this function returns the current value 143 // of the 32 bits c0_th register. 144 //////////////////////////////////////////////////////////////////////////////// 134 145 inline struct thread_s * hal_get_current_thread( void ) 135 146 { … … 141 152 } 142 153 143 /////////////////////////////////////////////////////// 154 //////////////////////////////////////////////////////////////////////////////// 155 // For the TSAR architecture, this function set a new value 156 // to the 32 bits c0_th register. 157 //////////////////////////////////////////////////////////////////////////////// 144 158 void hal_set_current_thread( struct thread_s * thread ) 145 159 { … … 182 196 } 183 197 184 /////////////////////////// 185 uint32_t hal_get_sp( void ) 198 //////////////////////////////////////////////////////////////////////////////// 199 // For the TSAR architecture, this function returns the current value 200 // of the 32 bits sp_29 register. 201 //////////////////////////////////////////////////////////////////////////////// 202 reg_t hal_get_sp( void ) 186 203 { 187 204 register uint32_t sp; … … 190 207 191 208 return sp; 192 }193 194 /////////////////////////////////////195 uint32_t hal_set_sp( void * new_val )196 {197 register uint32_t sp;198 199 asm volatile200 ( "or %0, $0, $29 \n"201 "or $29, $0, %1 \n"202 : "=&r" (sp) : "r" (new_val) );203 204 return sp;205 }206 207 ///////////////////////////208 uint32_t hal_get_ra( void )209 {210 register uint32_t ra;211 212 asm volatile ("or %0, $0, $31" : "=&r" (ra));213 214 return ra;215 209 } 216 210 -
trunk/hal/tsar_mips32/core/hal_switch.S
r457 r625 2 2 * hal_witch.S - CPU context switch function for TSAR-MIPS32 3 3 * 4 * Author Alain Greiner (2016 )4 * Author Alain Greiner (2016,2017,2018,2019) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites -
trunk/hal/tsar_mips32/core/hal_syscall.c
r481 r625 29 29 #include <hal_kentry.h> 30 30 31 ///////////////////// 31 /////////////////////////// 32 32 void hal_do_syscall( void ) 33 33 { … … 63 63 service_num ); 64 64 65 // get pointer on exit_thread uzone, because 66 // exit_thread can be different from enter_thread65 // get pointer on exit_thread uzone, because exit thread 66 // can be different from enter_thread for a fork syscall 67 67 this = CURRENT_THREAD; 68 68 exit_uzone = (uint32_t *)this->uzone_current; -
trunk/hal/tsar_mips32/core/hal_uspace.c
r610 r625 249 249 ".set noreorder \n" 250 250 "move $13, %1 \n" /* $13 <= str */ 251 "mfc2 $15, $1 \n" /* $15 <= DTLB and ITLB off*/251 "mfc2 $15, $1 \n" /* $15 <= MMU_MODE (DTLB off) */ 252 252 "ori $14, $15, 0x4 \n" /* $14 <= mode DTLB on */ 253 253 "1: \n" 254 254 "mtc2 $14, $1 \n" /* set DTLB on */ 255 "lb $12, 0($13) \n" /* read char from user space*/255 "lb $12, 0($13) \n" /* $12 <= one byte from u_space */ 256 256 "mtc2 $15, $1 \n" /* set DTLB off */ 257 257 "addi $13, $13, 1 \n" /* increment address */ -
trunk/hal/tsar_mips32/core/hal_vmm.c
r624 r625 48 48 // This function is called by the process_zero_init() function during kernel_init. 49 49 // It initializes the VMM of the kernel proces_zero (containing all kernel threads) 50 // in the local cluster: it registers one "kcode" vseg in kernel VSL, and registers51 // one big page in slot[0] of kernel GPT.50 // in the local cluster: For TSAR, it registers one "kcode" vseg in kernel VSL, 51 // and registers one big page in slot[0] of kernel GPT. 52 52 ////////////////////////////////////////////////////////////////////////////////////////// 53 53 error_t hal_vmm_kernel_init( boot_info_t * info ) … … 119 119 return 0; 120 120 121 } // end hal_ kernel_vmm_init()122 123 ////////////////////////////////////////////////////////////////////////////////////////// 124 // This function is called by the vmm_init() function to update the VMM of an user125 // process identified by the <process> argument.126 // It registers in the user VSL the "kcode" vseg, registered inthe local kernel VSL,127 // and register in the user GPT the big page[0] mapped inthe local kernel GPT.121 } // end hal_vmm_kernel_init() 122 123 ////////////////////////////////////////////////////////////////////////////////////////// 124 // This function registers in the VMM of an user process identified by the <process> 125 // argument all required kernel vsegs. 126 // For TSAR, it registers in the user VSL the "kcode" vseg, from the local kernel VSL, 127 // and register in the user GPT the big page[0] from the local kernel GPT. 128 128 ////////////////////////////////////////////////////////////////////////////////////////// 129 129 error_t hal_vmm_kernel_update( process_t * process ) 130 130 { 131 error_t error;131 error_t error; 132 132 uint32_t attr; 133 133 uint32_t ppn; 134 134 135 // get cluster identifier 136 cxy_t cxy = local_cxy; 137 135 138 #if DEBUG_HAL_VMM 136 139 thread_t * this = CURRENT_THREAD; 137 140 printk("\n[%s] thread[%x,%x] enter in cluster %x \n", 138 __FUNCTION__, this->process->pid, this->trdid, local_cxy ); 141 __FUNCTION__, this->process->pid, this->trdid, cxy ); 142 hal_vmm_display( &process_zero , true ); 139 143 hal_vmm_display( process , true ); 140 hal_vmm_display( &process_zero , true ); 141 #endif 142 143 // get cluster identifier 144 cxy_t cxy = local_cxy; 145 146 // get extended pointer on kernel GPT 144 #endif 145 146 // get extended pointer on local kernel GPT 147 147 xptr_t k_gpt_xp = XPTR( cxy , &process_zero.vmm.gpt ); 148 148 … … 212 212 bool_t mapping ) 213 213 { 214 // get pointer on process VMM 214 215 vmm_t * vmm = &process->vmm; 215 gpt_t * gpt = &vmm->gpt;216 216 217 217 // get pointers on TXT0 chdev … … 220 220 chdev_t * txt0_ptr = GET_PTR( txt0_xp ); 221 221 222 // get extended pointer on remote TXT0lock223 xptr_t lock_xp = XPTR( txt0_cxy, &txt0_ptr->wait_lock );224 225 // get locks protecting the VSL and the GPT226 remote_rwlock_rd_acquire( XPTR( local_cxy , &vmm->vsegs_lock ) ); 227 remote_rwlock_rd_acquire( XPTR( local_cxy , &vmm->gpt_lock ) );228 229 // get TXT0 lock 230 remote_busylock_acquire( lock_xp );231 232 nolock_printk("\n***** VSL and GPT for process %x in cluster %x\n",233 process->pid , local_cxy);234 235 // scan the list of vsegs236 xptr_t root_xp = XPTR( local_cxy , &vmm->vsegs_root);237 xptr_t iter_xp; 238 xptr_t vseg_xp;239 vseg_t * vseg;240 XLIST_FOREACH( root_xp , iter_xp )241 {242 vseg_xp = XLIST_ELEMENT( iter_xp , vseg_t , xlist );243 vseg = GET_PTR( vseg_xp );244 245 nolock_printk(" - %s : base = %X / size = %X / npages = %d\n",246 vseg_t ype_str( vseg->type ) , vseg->min , vseg->max - vseg->min , vseg->vpn_size );247 248 if( mapping)222 // build extended pointers on TXT0 lock, GPT lock and VSL lock 223 xptr_t txt_lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock ); 224 xptr_t vsl_lock_xp = XPTR( local_cxy , &vmm->vsl_lock ); 225 xptr_t gpt_lock_xp = XPTR( local_cxy , &vmm->gpt_lock ); 226 227 // get root of vsegs list 228 xptr_t root_xp = XPTR( local_cxy , &vmm->vsegs_root ); 229 230 // get the locks protecting TXT0, VSL, and GPT 231 remote_rwlock_rd_acquire( vsl_lock_xp ); 232 remote_rwlock_rd_acquire( gpt_lock_xp ); 233 remote_busylock_acquire( txt_lock_xp ); 234 235 nolock_printk("\n***** VSL and GPT for process %x in cluster %x / PT1 = %x\n", 236 process->pid , local_cxy , vmm->gpt.ptr ); 237 238 if( xlist_is_empty( root_xp ) ) 239 { 240 nolock_printk(" ... no vsegs registered\n"); 241 } 242 else // scan the list of vsegs 243 { 244 xptr_t iter_xp; 245 xptr_t vseg_xp; 246 vseg_t * vseg; 247 248 XLIST_FOREACH( root_xp , iter_xp ) 249 249 { 250 vpn_t vpn = vseg->vpn_base; 251 vpn_t vpn_max = vpn + vseg->vpn_size; 252 ppn_t ppn; 253 uint32_t attr; 254 255 while( vpn < vpn_max ) 250 vseg_xp = XLIST_ELEMENT( iter_xp , vseg_t , xlist ); 251 vseg = GET_PTR( vseg_xp ); 252 253 nolock_printk(" - %s : base = %X / size = %X / npages = %d\n", 254 vseg_type_str(vseg->type), vseg->min, vseg->max - vseg->min, vseg->vpn_size ); 255 256 if( mapping ) 256 257 { 257 hal_gpt_get_pte( XPTR( local_cxy , gpt ) , vpn , &attr , &ppn ); 258 259 if( attr & GPT_MAPPED ) 258 vpn_t vpn = vseg->vpn_base; 259 vpn_t vpn_max = vpn + vseg->vpn_size; 260 ppn_t ppn; 261 uint32_t attr; 262 263 while( vpn < vpn_max ) // scan the PTEs 260 264 { 261 if( attr & GPT_SMALL ) 265 hal_gpt_get_pte( XPTR( local_cxy , &vmm->gpt ) , vpn , &attr , &ppn ); 266 267 if( attr & GPT_MAPPED ) 262 268 { 263 nolock_printk(" . SMALL : vpn = %X / attr = %X / ppn = %X\n", 264 vpn , attr , ppn ); 265 vpn++; 269 if( attr & GPT_SMALL ) 270 { 271 nolock_printk(" . SMALL : vpn = %X / attr = %X / ppn = %X\n", 272 vpn , attr , ppn ); 273 vpn++; 274 } 275 else 276 { 277 nolock_printk(" . BIG : vpn = %X / attr = %X / ppn = %X\n", 278 vpn , attr , ppn ); 279 vpn += 512; 280 } 266 281 } 267 282 else 268 283 { 269 nolock_printk(" . BIG : vpn = %X / attr = %X / ppn = %X\n", 270 vpn , attr , ppn ); 271 vpn += 512; 284 vpn++; 272 285 } 273 }274 else275 {276 vpn++;277 286 } 278 287 } … … 280 289 } 281 290 282 // release TXT0 lock 283 remote_busylock_release( lock_xp ); 284 285 // release the VSK and GPT locks 286 remote_rwlock_rd_release( XPTR( local_cxy , &vmm->vsegs_lock ) ); 287 remote_rwlock_rd_release( XPTR( local_cxy , &vmm->gpt_lock ) ); 291 // release locks 292 remote_busylock_release( txt_lock_xp ); 293 remote_rwlock_rd_release( gpt_lock_xp ); 294 remote_rwlock_rd_release( vsl_lock_xp ); 288 295 289 296 } // hal_vmm_display() -
trunk/hal/tsar_mips32/drivers/soclib_tty.c
r619 r625 346 346 owner_pid = hal_remote_l32( XPTR( owner_cxy , &owner_ptr->pid ) ); 347 347 348 // block TXT owner process only if it is not the INIT process 349 if( owner_pid != 1 ) 350 { 351 // get parent process descriptor pointers 352 parent_xp = hal_remote_l64( XPTR( owner_cxy , &owner_ptr->parent_xp ) ); 353 parent_cxy = GET_CXY( parent_xp ); 354 parent_ptr = GET_PTR( parent_xp ); 355 356 // get pointers on the parent process main thread 357 parent_main_ptr = hal_remote_lpt(XPTR(parent_cxy,&parent_ptr->th_tbl[0])); 358 parent_main_xp = XPTR( parent_cxy , parent_main_ptr ); 359 360 // transfer TXT ownership 361 process_txt_transfer_ownership( owner_xp ); 362 363 // block all threads in all clusters, but the main thread 364 process_sigaction( owner_pid , BLOCK_ALL_THREADS ); 365 366 // block the main thread 367 xptr_t main_xp = XPTR( owner_cxy , &owner_ptr->th_tbl[0] ); 368 thread_block( main_xp , THREAD_BLOCKED_GLOBAL ); 369 370 // atomically update owner process termination state 371 hal_remote_atomic_or( XPTR( owner_cxy , &owner_ptr->term_state ) , 372 PROCESS_TERM_STOP ); 373 374 // unblock the parent process main thread 375 thread_unblock( parent_main_xp , THREAD_BLOCKED_WAIT ); 376 377 return; 378 } 348 // TXT owner cannot be the INIT process 349 assert( (owner_pid != 1) , "INIT process cannot be the TXT owner" ); 350 351 // get parent process descriptor pointers 352 parent_xp = hal_remote_l64( XPTR( owner_cxy , &owner_ptr->parent_xp ) ); 353 parent_cxy = GET_CXY( parent_xp ); 354 parent_ptr = GET_PTR( parent_xp ); 355 356 // get pointers on the parent process main thread 357 parent_main_ptr = hal_remote_lpt(XPTR(parent_cxy,&parent_ptr->th_tbl[0])); 358 parent_main_xp = XPTR( parent_cxy , parent_main_ptr ); 359 360 // transfer TXT ownership 361 process_txt_transfer_ownership( owner_xp ); 362 363 // mark for block all threads in all clusters, but the main 364 process_sigaction( owner_pid , BLOCK_ALL_THREADS ); 365 366 // block the main thread 367 xptr_t main_xp = XPTR( owner_cxy , &owner_ptr->th_tbl[0] ); 368 thread_block( main_xp , THREAD_BLOCKED_GLOBAL ); 369 370 // atomically update owner process termination state 371 hal_remote_atomic_or( XPTR( owner_cxy , &owner_ptr->term_state ) , 372 PROCESS_TERM_STOP ); 373 374 // unblock the parent process main thread 375 thread_unblock( parent_main_xp , THREAD_BLOCKED_WAIT ); 376 377 return; 379 378 } 380 379 … … 390 389 owner_xp = process_txt_get_owner( channel ); 391 390 392 // check process exist 393 assert( (owner_xp != XPTR_NULL) , 394 "TXT owner process not found\n" ); 391 // check process exist 392 assert( (owner_xp != XPTR_NULL) , "TXT owner process not found\n" ); 395 393 396 394 // get relevant infos on TXT owner process … … 399 397 owner_pid = hal_remote_l32( XPTR( owner_cxy , &owner_ptr->pid ) ); 400 398 401 // kill TXT owner process only if it is not the INIT process 402 if( owner_pid != 1 ) 403 { 404 // get parent process descriptor pointers 405 parent_xp = hal_remote_l64( XPTR( owner_cxy , &owner_ptr->parent_xp ) ); 406 parent_cxy = GET_CXY( parent_xp ); 407 parent_ptr = GET_PTR( parent_xp ); 408 409 // get pointers on the parent process main thread 410 parent_main_ptr = hal_remote_lpt(XPTR(parent_cxy,&parent_ptr->th_tbl[0])); 411 parent_main_xp = XPTR( parent_cxy , parent_main_ptr ); 412 413 // remove process from TXT list 414 process_txt_detach( owner_xp ); 415 416 // mark for delete all thread in all clusters, but the main 417 process_sigaction( owner_pid , DELETE_ALL_THREADS ); 399 // TXT owner cannot be the INIT process 400 assert( (owner_pid != 1) , "INIT process cannot be the TXT owner" ); 401 402 #if DEBUG_HAL_TXT_RX 403 if( DEBUG_HAL_TXT_RX < rx_cycle ) 404 printk("\n[%s] TXT%d owner is process %x\n", 405 __FUNCTION__, channel, owner_pid ); 406 #endif 407 // get parent process descriptor pointers 408 parent_xp = hal_remote_l64( XPTR( owner_cxy , &owner_ptr->parent_xp ) ); 409 parent_cxy = GET_CXY( parent_xp ); 410 parent_ptr = GET_PTR( parent_xp ); 411 412 // get pointers on the parent process main thread 413 parent_main_ptr = hal_remote_lpt(XPTR(parent_cxy,&parent_ptr->th_tbl[0])); 414 parent_main_xp = XPTR( parent_cxy , parent_main_ptr ); 415 416 // transfer TXT ownership 417 process_txt_transfer_ownership( owner_xp ); 418 419 // remove process from TXT list 420 // process_txt_detach( owner_xp ); 421 422 // mark for delete all thread in all clusters, but the main 423 process_sigaction( owner_pid , DELETE_ALL_THREADS ); 418 424 419 // block main thread 420 xptr_t main_xp = XPTR( owner_cxy , &owner_ptr->th_tbl[0] ); 421 thread_block( main_xp , THREAD_BLOCKED_GLOBAL ); 422 423 // atomically update owner process termination state 424 hal_remote_atomic_or( XPTR( owner_cxy , &owner_ptr->term_state ) , 425 PROCESS_TERM_KILL ); 426 427 // unblock the parent process main thread 428 thread_unblock( parent_main_xp , THREAD_BLOCKED_WAIT ); 429 430 return; 431 } 425 #if DEBUG_HAL_TXT_RX 426 if( DEBUG_HAL_TXT_RX < rx_cycle ) 427 printk("\n[%s] marked for delete all threads of process but main\n", 428 __FUNCTION__, owner_pid ); 429 #endif 430 // block main thread 431 xptr_t main_xp = XPTR( owner_cxy , &owner_ptr->th_tbl[0] ); 432 thread_block( main_xp , THREAD_BLOCKED_GLOBAL ); 433 434 #if DEBUG_HAL_TXT_RX 435 if( DEBUG_HAL_TXT_RX < rx_cycle ) 436 printk("\n[%s] blocked process %x main thread\n", 437 __FUNCTION__, owner_pid ); 438 #endif 439 440 // atomically update owner process termination state 441 hal_remote_atomic_or( XPTR( owner_cxy , &owner_ptr->term_state ) , 442 PROCESS_TERM_KILL ); 443 444 // unblock the parent process main thread 445 thread_unblock( parent_main_xp , THREAD_BLOCKED_WAIT ); 446 447 #if DEBUG_HAL_TXT_RX 448 if( DEBUG_HAL_TXT_RX < rx_cycle ) 449 printk("\n[%s] unblocked parent process %x main thread\n", 450 __FUNCTION__, hal_remote_l32( XPTR( parent_cxy , &parent_ptr->pid) ) ); 451 #endif 452 return; 432 453 } 433 454 -
trunk/kernel/fs/fatfs.c
r623 r625 2 2 * fatfs.c - FATFS file system API implementation. 3 3 * 4 * Author Alain Greiner (2016,2017,2018 )4 * Author Alain Greiner (2016,2017,2018,2019) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 374 374 375 375 ////////////////////////////////////////////////////////////////////////////////////////// 376 // This static function - atomically - decrements "free_clusters", and updates 377 // the "free_cluster_hint" shared variables in the FATFS context in the FAT cluster. 378 // It scan the FAT to find the first free slot larger than the <cluster> argument, 379 // and set "free_cluster_hint" <= (free - 1). 376 // This static function decrements the "free_clusters" variable, and updates the 377 // "free_cluster_hint" variable in the FATFS context, identified by the <fat_ctx_cxy> 378 // and <fat_ctx_ptr> arguments (cluster containing the FAT mapper). 379 // It scan all slots in the FAT mapper seen as an array of 32 bits words, looking for the 380 // first free slot larger than the <cluster> argument, to update "free_cluster_hint". 380 381 // 381 382 // WARNING : The free_lock protecting exclusive access to these variables 382 383 // must be taken by the calling function. 383 384 ////////////////////////////////////////////////////////////////////////////////////////// 384 // @ cluster : recently allocated cluster index in FAT. 385 ////////////////////////////////////////////////////////////////////////////////////////// 386 static error_t fatfs_free_clusters_decrement( uint32_t cluster ) 387 { 388 fatfs_ctx_t * loc_ctx; // local pointer on local FATFS context 389 fatfs_ctx_t * fat_ctx; // local pointer on FATFS in cluster containing FAT mapper 390 cxy_t mapper_cxy; // cluster identifier for cluster containing FAT mapper 385 // @ fat_ctx_cxy : FAT mapper cluster identifier. 386 // @ fat_ctx_ptr : local pointer on FATFS context. 387 // @ cluster : recently allocated cluster index in FAT. 388 ////////////////////////////////////////////////////////////////////////////////////////// 389 static error_t fatfs_free_clusters_decrement( cxy_t fat_ctx_cxy, 390 fatfs_ctx_t * fat_ctx_ptr, 391 uint32_t cluster ) 392 { 391 393 xptr_t mapper_xp; // extended pointer on FAT mapper 392 394 xptr_t hint_xp; // extended pointer on "free_cluster_hint" shared variable … … 397 399 uint32_t page_max; // max number of pages in FAT mapper 398 400 xptr_t page_xp; // extended pointer on current page in FAT mapper 401 xptr_t base_xp; // extended pointer on current page base 399 402 xptr_t slot_xp; // extended pointer on current slot in FAT mapper 400 403 … … 407 410 #endif 408 411 409 // get local pointer on local FATFS context410 loc_ctx = fs_context[FS_TYPE_FATFS].extend;411 412 // get cluster containing FAT mapper413 mapper_xp = loc_ctx->fat_mapper_xp;414 mapper_cxy = GET_CXY( mapper_xp );415 416 // get local pointer on FATFS context in FAT cluster417 fat_ctx = hal_remote_lpt( XPTR( mapper_cxy , &fs_context[FS_TYPE_FATFS].extend ) );418 419 412 // build extended pointers on free_clusters, and free_cluster_hint 420 hint_xp = XPTR( mapper_cxy , &fat_ctx->free_cluster_hint );421 numb_xp = XPTR( mapper_cxy , &fat_ctx->free_clusters );413 hint_xp = XPTR( fat_ctx_cxy , &fat_ctx_ptr->free_cluster_hint ); 414 numb_xp = XPTR( fat_ctx_cxy , &fat_ctx_ptr->free_clusters ); 422 415 423 416 // update "free_clusters" … … 425 418 hal_remote_s32( numb_xp , numb - 1 ); 426 419 427 // scan FAT mapper to find the first free slot > cluster 428 // and update "free_cluster_hint" as (free - 1) 420 // get extended pointer on FAT mapper 421 mapper_xp = hal_remote_l64( XPTR( fat_ctx_cxy , &fat_ctx_ptr->fat_mapper_xp ) ); 422 423 // initialise variables to scan the FAT mapper 424 // and find the first free slot > cluster 429 425 page_id = (cluster + 1) >> 10; 430 426 slot_id = (cluster + 1) & 0x3FF; 431 page_max = (loc_ctx->fat_sectors_count >> 3);427 page_max = hal_remote_l32( XPTR( fat_ctx_cxy, &fat_ctx_ptr->fat_sectors_count ) ) >> 3; 432 428 433 429 // scan FAT mapper / loop on pages … … 443 439 } 444 440 441 // get extended pointer on page 442 base_xp = ppm_page2base( page_xp ); 443 445 444 // scan FAT mapper / loop on slots 446 445 while ( slot_id < 1024 ) 447 446 { 448 447 // get extended pointer on current slot 449 slot_xp = ppm_page2base( page_xp )+ (slot_id << 2);450 451 // test FATslot value448 slot_xp = base_xp + (slot_id << 2); 449 450 // test slot value 452 451 if ( hal_remote_l32( slot_xp ) == FREE_CLUSTER ) 453 452 { 454 // update "free_cluster_hint" <= (free - 1)453 // update "free_cluster_hint" 455 454 hal_remote_s32( hint_xp , (page_id << 10) + slot_id - 1 ); 456 455 … … 465 464 } 466 465 467 // incrementslot_id468 slot_id ++;466 // update slot_id 467 slot_id = 0; 469 468 470 469 } // end loop on slots 471 470 472 // update loopvariables471 // update (page_id,slot_id) variables 473 472 page_id++; 474 473 slot_id = 0; … … 483 482 484 483 ////////////////////////////////////////////////////////////////////////////////////////// 485 // This static function atomically increments <free_clusters>, and updates 486 // the <free_cluster_hint> shared variables in the FATFS context in the FAT cluster. 487 // If the released cluster index is smaller than the current (hint + 1) value, 488 // it set "free_cluster_hint" <= cluster - 1. 484 // This static function increments the "free_clusters" variable, and updates the 485 // "free_cluster_hint" variables in the FATFS context, identified by the <fat_ctx_cxy> 486 // and <fat_ctx_ptr> argument (cluster containing the FAT mapper). 487 // If the released cluster index is smaller than the current (hint) value, 488 // it set "free_cluster_hint" <= cluster. 489 489 // 490 490 // WARNING : The free_lock protecting exclusive access to these variables 491 491 // must be taken by the calling function. 492 492 ////////////////////////////////////////////////////////////////////////////////////////// 493 // @ fat_ctx_cxy : FAT mapper cluster identifier. 494 // @ fat_ctx_ptr : local pointer on FATFS context. 493 495 // @ cluster : recently released cluster index in FAT. 494 496 ////////////////////////////////////////////////////////////////////////////////////////// 495 static void fatfs_free_clusters_increment( uint32_t cluster ) 496 { 497 fatfs_ctx_t * loc_ctx; // local pointer on local FATFS context 498 fatfs_ctx_t * fat_ctx; // local pointer on FATFS in cluster containing FAT mapper 499 cxy_t fat_cxy; // cluster identifier for cluster containing FAT mapper 497 static void fatfs_free_clusters_increment( cxy_t fat_ctx_cxy, 498 fatfs_ctx_t * fat_ctx_ptr, 499 uint32_t cluster ) 500 { 500 501 xptr_t hint_xp; // extended pointer on "free_cluster_hint" shared variable 501 502 xptr_t numb_xp; // extended pointer on "free_clusters" shared variable … … 503 504 uint32_t numb; // "free_clusters" variable current value 504 505 505 // get local pointer on local FATFS context 506 loc_ctx = fs_context[FS_TYPE_FATFS].extend; 507 508 // get cluster containing FAT mapper 509 fat_cxy = GET_CXY( loc_ctx->fat_mapper_xp ); 510 511 // get local pointer on FATFS context in FAT cluster 512 fat_ctx = hal_remote_lpt( XPTR( fat_cxy , &fs_context[FS_TYPE_FATFS].extend ) ); 513 514 // build extended pointers free_lock, free_clusters, and free_cluster_hint 515 hint_xp = XPTR( fat_cxy , &fat_ctx->free_cluster_hint ); 516 numb_xp = XPTR( fat_cxy , &fat_ctx->free_clusters ); 506 // build extended pointers on free_clusters, and free_cluster_hint 507 hint_xp = XPTR( fat_ctx_cxy , &fat_ctx_ptr->free_cluster_hint ); 508 numb_xp = XPTR( fat_ctx_cxy , &fat_ctx_ptr->free_clusters ); 517 509 518 510 // get current value of free_cluster_hint and free_clusters … … 521 513 522 514 // update free_cluster_hint if required 523 if ( cluster < (hint + 1)) hal_remote_s32( hint_xp , (cluster - 1) );515 if ( (cluster - 1) < hint ) hal_remote_s32( hint_xp , (cluster - 1) ); 524 516 525 517 // update free_clusters … … 542 534 // It does NOT update the FS on the IOC device. 543 535 ////////////////////////////////////////////////////////////////////////////////////////// 544 // @ fat_mapper_xp : extended pointer on FAT mapper. 545 // @ cluster : cluster index in FAT. 536 // @ mapper_cxy : FAT mapper cluster identifier. 537 // @ mapper_ptr : local pointer on FAT mapper. 538 // @ fatfs_ctx : local pointer on FATFS context in FAT cluster. 539 // @ cluster : index of cluster to be released from FAT mapper. 546 540 // @ return 0 if success / return -1 if error (cannot access FAT) 547 541 ////////////////////////////////////////////////////////////////////////////////////////// 548 static error_t fatfs_recursive_release( xptr_t fat_mapper_xp, 549 uint32_t cluster ) 542 static error_t fatfs_recursive_release( cxy_t mapper_cxy, 543 mapper_t * mapper_ptr, 544 fatfs_ctx_t * fatfs_ctx, 545 uint32_t cluster ) 550 546 { 551 547 uint32_t next; 552 548 553 // get next cluster from FAT mapper 554 if ( mapper_remote_get_32( fat_mapper_xp , cluster , &next ) ) return -1; 549 // build extended pointer on FAT mapper 550 xptr_t mapper_xp = XPTR( mapper_cxy , mapper_ptr ); 551 552 // get next cluster index from FAT mapper 553 if ( mapper_remote_get_32( mapper_xp, 554 cluster, 555 &next ) ) return -1; 555 556 556 557 #if (DEBUG_FATFS_RELEASE_INODE & 1) … … 564 565 { 565 566 // call fatfs_recursive_release() on next cluster 566 if ( fatfs_recursive_release( fat_mapper_xp , next ) ) return -1; 567 if ( fatfs_recursive_release( mapper_cxy, 568 mapper_ptr, 569 fatfs_ctx, 570 next ) ) return -1; 567 571 } 568 572 569 573 // update current cluster in FAT mapper 570 if ( mapper_remote_set_32( fat_mapper_xp, cluster , FREE_CLUSTER ) ) return -1; 574 if ( mapper_remote_set_32( mapper_xp, 575 cluster, 576 FREE_CLUSTER ) ) return -1; 571 577 572 578 // Update free_cluster_hint and free_clusters in FAT context 573 fatfs_free_clusters_increment( cluster ); 579 fatfs_free_clusters_increment( mapper_cxy, 580 fatfs_ctx, 581 cluster ); 574 582 575 583 return 0; … … 582 590 ////////////////////////////////////////////////////////////////////////////////////////// 583 591 584 ////////////////////////////// 585 void fatfs_ctx_display( void ) 586 { 587 // get pointer on local FATFS context 588 vfs_ctx_t * vfs_ctx = &fs_context[FS_TYPE_FATFS]; 589 fatfs_ctx_t * fatfs_ctx = (fatfs_ctx_t *)vfs_ctx->extend; 590 592 /////////////////////////////////////////// 593 void fatfs_ctx_display( fatfs_ctx_t * ctx ) 594 { 591 595 printk("\n*** FAT context ***\n" 592 596 "- fat_sectors = %d\n" … … 599 603 "- free_cluster_hint = %d\n" 600 604 "- fat_mapper_xp = %l\n", 601 fatfs_ctx->fat_sectors_count,602 fatfs_ctx->bytes_per_sector,603 fatfs_ctx->sectors_per_cluster * fatfs_ctx->bytes_per_sector,604 fatfs_ctx->fat_begin_lba,605 fatfs_ctx->cluster_begin_lba,606 fatfs_ctx->root_dir_cluster,607 fatfs_ctx->free_clusters,608 fatfs_ctx->free_cluster_hint,609 fatfs_ctx->fat_mapper_xp );610 611 } // end fatfs_ctx_display()605 ctx->fat_sectors_count, 606 ctx->bytes_per_sector, 607 ctx->sectors_per_cluster * ctx->bytes_per_sector, 608 ctx->fat_begin_lba, 609 ctx->cluster_begin_lba, 610 ctx->root_dir_cluster, 611 ctx->free_clusters, 612 ctx->free_cluster_hint, 613 ctx->fat_mapper_xp ); 614 615 } // end ctx_display() 612 616 613 617 ////////////////////////////////////////// … … 659 663 uint32_t * buffer; // pointer on current page (array of uint32_t) 660 664 uint32_t current_page_index; // index of current page in FAT 661 uint32_t current_ page_offset; // offsetof slot in current page665 uint32_t current_slot_index; // index of slot in current page 662 666 uint32_t page_count_in_file; // index of page in file (index in linked list) 663 667 uint32_t next_cluster_id; // content of current FAT slot … … 670 674 thread_t * this = CURRENT_THREAD; 671 675 if( DEBUG_FATFS_GET_CLUSTER < cycle ) 672 printk("\n[%s] thread[%x,%x] enter / first_cluster_id %d / searched_index / cycle %d\n",676 printk("\n[%s] thread[%x,%x] enter / first_cluster_id %d / searched_index %d / cycle %d\n", 673 677 __FUNCTION__, this->process->pid, this->trdid, first_cluster_id, searched_page_index, cycle ); 674 678 #endif … … 678 682 679 683 // get extended pointer and cluster on FAT mapper 680 xptr_t mapper_xp = ctx->fat_mapper_xp;681 cxy_t mapper_cxy = GET_CXY(mapper_xp );684 xptr_t fat_mapper_xp = ctx->fat_mapper_xp; 685 cxy_t fat_mapper_cxy = GET_CXY( fat_mapper_xp ); 682 686 683 687 // initialize loop variable (1024 slots per page) 684 688 current_page_index = first_cluster_id >> 10; 685 current_ page_offset= first_cluster_id & 0x3FF;689 current_slot_index = first_cluster_id & 0x3FF; 686 690 page_count_in_file = 0; 687 691 next_cluster_id = 0xFFFFFFFF; 688 692 689 // scan FAT (i.e. traverse FAT linked list)693 // scan FAT mapper (i.e. traverse FAT linked list) 690 694 while( page_count_in_file < searched_page_index ) 691 695 { 692 // get pointer on current page descriptor 693 current_page_xp = mapper_remote_get_page( mapper_xp , current_page_index );696 // get pointer on current page descriptor in FAT mapper 697 current_page_xp = mapper_remote_get_page( fat_mapper_xp , current_page_index ); 694 698 695 699 if( current_page_xp == XPTR_NULL ) 696 700 { 697 // TODO701 printk("\n[ERROR] in %s : cannot get next page from FAT mapper\n", __FUNCTION__); 698 702 return -1; 699 703 } … … 704 708 705 709 // get FAT slot content 706 next_cluster_id = hal_remote_l32( XPTR( mapper_cxy , &buffer[current_page_offset] ) ); 710 next_cluster_id = hal_remote_l32( XPTR( fat_mapper_cxy, 711 &buffer[current_slot_index] ) ); 707 712 708 713 #if (DEBUG_FATFS_GET_CLUSTER & 1) 709 714 if( DEBUG_FATFS_GET_CLUSTER < cycle ) 710 715 printk("\n[%s] traverse FAT / current_page_index = %d\n" 711 "current_ page_offset= %d / next_cluster_id = %d\n",712 __FUNCTION__, current_page_index, current_ page_offset, next_cluster_id );716 "current_slot_index = %d / next_cluster_id = %d\n", 717 __FUNCTION__, current_page_index, current_slot_index , next_cluster_id ); 713 718 #endif 714 719 715 720 // update loop variables 716 current_page_index 717 current_ page_offset= next_cluster_id & 0x3FF;721 current_page_index = next_cluster_id >> 10; 722 current_slot_index = next_cluster_id & 0x3FF; 718 723 page_count_in_file++; 719 724 } 720 725 721 if( next_cluster_id == 0xFFFFFFFF ) return -1; 726 if( next_cluster_id == 0xFFFFFFFF ) 727 { 728 printk("\n[ERROR] in %s : searched_cluster_id not found in FAT\n", __FUNCTION__ ); 729 return -1; 730 } 722 731 723 732 #if DEBUG_FATFS_GET_CLUSTER … … 759 768 760 769 #if DEBUG_FATFS_CTX_INIT 761 uint32_t cycle = (uint32_t)hal_get_cycles(); 770 uint32_t cycle = (uint32_t)hal_get_cycles(); 771 thread_t * this = CURRENT_THREAD; 762 772 if( DEBUG_FATFS_CTX_INIT < cycle ) 763 773 printk("\n[%s] thread[%x,%x] enter for fatfs_ctx = %x / cycle %d\n", … … 766 776 767 777 // check argument 768 assert( (fatfs_ctx != NULL) , "pointer on FATFS context is NULL \n" );778 assert( (fatfs_ctx != NULL) , "pointer on FATFS context is NULL" ); 769 779 770 780 // check only cluster 0 does FATFS init 771 assert( (local_cxy == 0) , "only cluster 0 can initialize FATFS \n");781 assert( (local_cxy == 0) , "only cluster 0 can initialize FATFS"); 772 782 773 783 // allocate a 512 bytes buffer to store the boot record … … 882 892 // WARNING : the inode field MUST be NULL for the FAT mapper 883 893 fat_mapper->inode = NULL; 894 884 895 885 896 // initialize the FATFS context … … 895 906 896 907 remote_queuelock_init( XPTR( local_cxy , &fatfs_ctx->free_lock ) , LOCK_FATFS_FREE ); 908 909 #if (DEBUG_FATFS_CTX_INIT & 0x1) 910 if( DEBUG_FATFS_CTX_INIT < cycle ) 911 fatfs_ctx_display( fatfs_ctx ); 912 #endif 897 913 898 914 #if DEBUG_FATFS_CTX_INIT … … 1525 1541 xptr_t child_inode_xp ) 1526 1542 { 1527 uint8_t * entry; // pointer on FAT32 directory entry (array of 32 bytes) 1528 uint32_t index; // index of FAT32 directory entry in mapper 1529 mapper_t * mapper; // pointer on directory mapper 1530 uint32_t cluster; // directory entry cluster 1531 uint32_t size; // directory entry size 1532 bool_t is_dir; // directory entry type (file/dir) 1533 error_t error; 1543 uint8_t * entry; // pointer on FAT32 directory entry (array of 32 bytes) 1544 uint32_t index; // index of FAT32 directory entry in mapper 1545 mapper_t * mapper; // pointer on directory mapper 1546 uint32_t cluster; // directory entry cluster 1547 uint32_t size; // directory entry size 1548 bool_t is_dir; // directory entry type (file/dir) 1549 xptr_t root_xp; // extended pointer on root of parent dentries 1550 xptr_t iter_xp; // iterator for this list 1551 cxy_t child_inode_cxy; // child inode cluster 1552 vfs_inode_t * child_inode_ptr; // child inode local pointer 1553 xptr_t dentry_xp; // extended pointer on searched dentry descriptor 1554 cxy_t dentry_cxy; // cluster identifier of dentry (must be local_cxy) 1555 vfs_dentry_t * dentry_ptr; // local pointer 1556 error_t error; 1557 1558 char dir_name[CONFIG_VFS_MAX_NAME_LENGTH]; 1534 1559 1535 1560 // check arguments … … 1539 1564 1540 1565 #if DEBUG_FATFS_GET_DENTRY 1541 char parent_name[CONFIG_VFS_MAX_NAME_LENGTH];1542 1566 uint32_t cycle = (uint32_t)hal_get_cycles(); 1543 1567 thread_t * this = CURRENT_THREAD; 1544 vfs_inode_get_name( XPTR( local_cxy , parent_inode ) , parent_name );1568 vfs_inode_get_name( XPTR( local_cxy , parent_inode ) , dir_name ); 1545 1569 if( DEBUG_FATFS_GET_DENTRY < cycle ) 1546 1570 printk("\n[%s] thread[%x,%x] enter for child <%s> in parent <%s> / cycle %d\n", 1547 __FUNCTION__, this->process->pid, this->trdid, name , parent_name , cycle );1548 #endif 1549 1550 // get pointer and index of searched directory entry in mapper1571 __FUNCTION__, this->process->pid, this->trdid, name , dir_name , cycle ); 1572 #endif 1573 1574 // get local pointer on parent mapper 1551 1575 mapper = parent_inode->mapper; 1576 1577 // get pointer and index in mapper for searched directory entry 1552 1578 error = fatfs_scan_directory( mapper, name , &entry , &index ); 1553 1579 1554 // update child inode and dentry descriptors if sucess 1555 if( error == 0 ) 1580 if( error ) 1556 1581 { 1582 vfs_inode_get_name( XPTR( local_cxy , parent_inode ) , dir_name ); 1583 printk("\n[ERROR] in %s : cannot find <%s> in parent mapper <%s>\n", 1584 __FUNCTION__, name , dir_name ); 1585 return -1; 1586 } 1587 1588 // get relevant infos from FAT32 directory entry 1589 cluster = (fatfs_get_record( DIR_FST_CLUS_HI , entry , 1 ) << 16) | 1590 (fatfs_get_record( DIR_FST_CLUS_LO , entry , 1 ) ) ; 1591 is_dir = (fatfs_get_record( DIR_ATTR , entry , 1 ) & ATTR_DIRECTORY); 1592 size = fatfs_get_record( DIR_FILE_SIZE , entry , 1 ); 1593 1594 // get child inode cluster and local pointer 1595 child_inode_cxy = GET_CXY( child_inode_xp ); 1596 child_inode_ptr = GET_PTR( child_inode_xp ); 1597 1598 // build extended pointer on root of list of parent dentries 1599 root_xp = XPTR( child_inode_cxy , &child_inode_ptr->parents ); 1600 1601 // check child inode has at least one parent 1602 assert( (xlist_is_empty( root_xp ) == false ), "child inode must have one parent\n"); 1603 1604 // scan list of parent dentries to search the parent_inode 1605 bool_t found = false; 1606 XLIST_FOREACH( root_xp , iter_xp ) 1607 { 1608 // get pointers on dentry 1609 dentry_xp = XLIST_ELEMENT( iter_xp , vfs_dentry_t , parents ); 1610 dentry_cxy = GET_CXY( dentry_xp ); 1611 dentry_ptr = GET_PTR( dentry_xp ); 1612 1613 // get local pointer on current parent directory inode 1614 vfs_inode_t * current = hal_remote_lpt( XPTR( dentry_cxy , &dentry_ptr->parent ) ); 1615 1616 // check if current parent is the searched parent 1617 if( XPTR( dentry_cxy , current ) == XPTR( local_cxy , parent_inode ) ) 1618 { 1619 found = true; 1620 break; 1621 } 1622 } 1623 1624 if( found == false ) 1625 { 1626 vfs_inode_get_name( XPTR( local_cxy , parent_inode ) , dir_name ); 1627 printk("\n[ERROR] in %s : cannot find <%s> directory in list of parents for <%s>\n", 1628 __FUNCTION__, dir_name, name ); 1629 return -1; 1630 } 1631 1632 // update the child inode "type", "size", and "extend" fields 1633 vfs_inode_type_t type = (is_dir) ? INODE_TYPE_DIR : INODE_TYPE_FILE; 1634 1635 hal_remote_s32( XPTR( child_inode_cxy , &child_inode_ptr->type ) , type ); 1636 hal_remote_s32( XPTR( child_inode_cxy , &child_inode_ptr->size ) , size ); 1637 hal_remote_s32( XPTR( child_inode_cxy , &child_inode_ptr->extend ) , cluster ); 1638 1639 // update the dentry "extend" field 1640 dentry_ptr->extend = (void *)(intptr_t)index; 1557 1641 1558 1642 #if DEBUG_FATFS_GET_DENTRY 1559 1643 cycle = (uint32_t)hal_get_cycles(); 1560 1644 if( DEBUG_FATFS_GET_DENTRY < cycle ) 1561 printk("\n[%s] thread[%x,%x] exit / intialised child <%s> in %s / cycle %d\n", 1562 __FUNCTION__, this->process->pid, this->trdid, name, parent_name, cycle ); 1563 #endif 1564 // get relevant infos from FAT32 directory entry 1565 cluster = (fatfs_get_record( DIR_FST_CLUS_HI , entry , 1 ) << 16) | 1566 (fatfs_get_record( DIR_FST_CLUS_LO , entry , 1 ) ) ; 1567 is_dir = (fatfs_get_record( DIR_ATTR , entry , 1 ) & ATTR_DIRECTORY); 1568 size = fatfs_get_record( DIR_FILE_SIZE , entry , 1 ); 1569 1570 // get child inode cluster and local pointer 1571 cxy_t inode_cxy = GET_CXY( child_inode_xp ); 1572 vfs_inode_t * inode_ptr = GET_PTR( child_inode_xp ); 1573 1574 // build extended pointer on root of list of prent dentries 1575 xptr_t parents_root_xp = XPTR( inode_cxy , &inode_ptr->parents ); 1576 1577 // check child inode has at least one parent 1578 assert( (xlist_is_empty( parents_root_xp ) == false ), "child inode must have one parent\n"); 1579 1580 // get dentry pointers and cluster 1581 xptr_t dentry_xp = XLIST_FIRST( parents_root_xp , vfs_dentry_t , parents ); 1582 vfs_dentry_t * dentry_ptr = GET_PTR( dentry_xp ); 1583 cxy_t dentry_cxy = GET_CXY( dentry_xp ); 1584 1585 // check dentry descriptor in same cluster as parent inode 1586 assert( (dentry_cxy == local_cxy) , "illegal dentry cluster\n" ); 1587 1588 // update the child inode "type", "size", and "extend" fields 1589 vfs_inode_type_t type = (is_dir) ? INODE_TYPE_DIR : INODE_TYPE_FILE; 1590 1591 hal_remote_s32( XPTR( inode_cxy , &inode_ptr->type ) , type ); 1592 hal_remote_s32( XPTR( inode_cxy , &inode_ptr->size ) , size ); 1593 hal_remote_s32( XPTR( inode_cxy , &inode_ptr->extend ) , cluster ); 1594 1595 // update the dentry "extend" field 1596 dentry_ptr->extend = (void *)(intptr_t)index; 1597 1598 return 0; 1599 } 1600 else 1601 { 1602 return -1; 1603 } 1645 printk("\n[%s] thread[%x,%x] exit / intialised inode & dentry for <%s> in <%s> / cycle %d\n", 1646 __FUNCTION__, this->process->pid, this->trdid, name, dir_name, cycle ); 1647 #endif 1648 1649 return 0; 1604 1650 1605 1651 } // end fatfs_new_dentry() … … 1615 1661 error_t error; 1616 1662 1663 char dir_name[CONFIG_VFS_MAX_NAME_LENGTH]; 1664 1617 1665 // check arguments 1618 1666 assert( (inode != NULL) , "inode is NULL\n" ); … … 1621 1669 1622 1670 #if DEBUG_FATFS_UPDATE_DENTRY 1623 char dir_name[CONFIG_VFS_MAX_NAME_LENGTH];1624 1671 uint32_t cycle = (uint32_t)hal_get_cycles(); 1625 1672 thread_t * this = CURRENT_THREAD; 1626 1673 vfs_inode_get_name( XPTR( local_cxy , inode ) , dir_name ); 1627 1674 if( DEBUG_FATFS_UPDATE_DENTRY < cycle ) 1628 printk("\n[%s] thread[%x,%x] enter for entry <%s> in dir <%s>/ cycle %d\n",1629 __FUNCTION__, this->process->pid, this->trdid, d entry->name , dir_name, cycle );1630 #endif 1631 1632 // get pointer and index of searched directory entry in mapper1675 printk("\n[%s] thread[%x,%x] enter for <%s/%s> / size %d / cycle %d\n", 1676 __FUNCTION__, this->process->pid, this->trdid, dir_name, dentry->name, size, cycle ); 1677 #endif 1678 1679 // get local pointer on mapper 1633 1680 mapper = inode->mapper; 1681 1682 // get pointer and index in mapper for searched directory entry 1634 1683 error = fatfs_scan_directory( mapper, dentry->name , &entry , &index ); 1635 1684 1636 // update size in mapper if found 1637 if( error == 0 ) 1685 if( error ) 1638 1686 { 1687 vfs_inode_get_name( XPTR( local_cxy , inode ) , dir_name ); 1688 printk("\n[ERROR] in %s : cannot find <%s> in parent mapper <%s>\n", 1689 __FUNCTION__, dentry->name, dir_name ); 1690 return -1; 1691 } 1692 1693 // set size in FAT32 directory entry 1694 fatfs_set_record( DIR_FILE_SIZE , entry , 1 , size ); 1695 1696 // get local pointer on modified page base 1697 void * base = (void *)((intptr_t)entry & (~CONFIG_PPM_PAGE_MASK)); 1698 1699 // get extended pointer on modified page descriptor 1700 xptr_t page_xp = ppm_base2page( XPTR( local_cxy , base ) ); 1701 1702 // synchronously update the modified page on device 1703 error = fatfs_move_page( page_xp , IOC_SYNC_WRITE ); 1704 1705 if( error ) 1706 { 1707 vfs_inode_get_name( XPTR( local_cxy , inode ) , dir_name ); 1708 printk("\n[ERROR] in %s : cannot update parent directory <%s> on device\n", 1709 __FUNCTION__, dir_name ); 1710 return -1; 1711 } 1639 1712 1640 1713 #if DEBUG_FATFS_UPDATE_DENTRY 1641 1714 cycle = (uint32_t)hal_get_cycles(); 1642 1715 if( DEBUG_FATFS_UPDATE_DENTRY < cycle ) 1643 printk("\n[%s] thread[%x,%x] exit / found entry <%s> in <%s> / cycle %d\n", 1644 __FUNCTION__, this->process->pid, this->trdid, dentry->name, dir_name, cycle ); 1645 #endif 1646 // set size in FAT32 directory entry 1647 fatfs_set_record( DIR_FILE_SIZE , entry , 1 , size ); 1648 1649 // get local pointer on modified page base 1650 void * base = (void *)((intptr_t)entry & (~CONFIG_PPM_PAGE_MASK)); 1651 1652 // get extended pointer on modified page descriptor 1653 xptr_t page_xp = ppm_base2page( XPTR( local_cxy , base ) ); 1654 1655 // mark page as dirty 1656 ppm_page_do_dirty( page_xp ); 1657 1658 return 0; 1659 } 1660 else 1661 { 1662 return -1; 1663 } 1716 printk("\n[%s] thread[%x,%x] exit / updated size for <%s/%s> / cycle %d\n", 1717 __FUNCTION__, this->process->pid, this->trdid, dir_name, dentry->name, cycle ); 1718 #endif 1719 1720 return 0; 1664 1721 1665 1722 } // end fatfs_update_dentry() … … 2044 2101 error_t fatfs_cluster_alloc( uint32_t * searched_cluster ) 2045 2102 { 2046 uint32_t page_id; // page index in mapper2103 uint32_t page_id; // page index in FAT mapper 2047 2104 uint32_t slot_id; // slot index in page (1024 slots per page) 2048 uint32_t hint;// first free cluster index in FAT2105 uint32_t cluster; // first free cluster index in FAT 2049 2106 uint32_t free_clusters; // total number of free clusters 2050 2107 vfs_ctx_t * vfs_ctx; // local pointer on VFS context (same in all clusters) … … 2080 2137 fat_fatfs_ctx = hal_remote_lpt( XPTR( mapper_cxy , &vfs_ctx->extend ) ); 2081 2138 2082 // build relevant extended pointers in on free clusters info in FATcluster2139 // build relevant extended pointers on free clusters info in mapper cluster 2083 2140 lock_xp = XPTR( mapper_cxy , &fat_fatfs_ctx->free_lock ); 2084 2141 hint_xp = XPTR( mapper_cxy , &fat_fatfs_ctx->free_cluster_hint ); … … 2089 2146 2090 2147 // get hint and free_clusters values from FATFS context 2091 hint = hal_remote_l32( hint_xp );2148 cluster = hal_remote_l32( hint_xp ) + 1; 2092 2149 free_clusters = hal_remote_l32( numb_xp ); 2093 2150 2094 // get page index & slot index for the first free cluster2095 page_id = (hint + 1) >> 10;2096 slot_id = (hint + 1) & 0x3FF;2097 2098 // get relevant page from mapper2099 page_xp = mapper_remote_get_page( mapper_xp , page_id );2100 2101 if( page_xp == XPTR_NULL )2102 {2103 printk("\n[ERROR] in %s : cannot acces FAT mapper\n", __FUNCTION__ );2104 return -1;2105 }2106 2107 // build extended pointer on free cluster slot2108 slot_xp = ppm_page2base( page_xp ) + (slot_id<<2);2109 2110 2151 #if (DEBUG_FATFS_CLUSTER_ALLOC & 1) 2111 2152 if( DEBUG_FATFS_CLUSTER_ALLOC < cycle ) 2112 printk("\n[%s] thread[%x,%x] get free info /hint %x / free_clusters %x\n",2113 __FUNCTION__, this->process->pid, this->trdid, hint, free_clusters );2153 printk("\n[%s] thread[%x,%x] get free info : hint %x / free_clusters %x\n", 2154 __FUNCTION__, this->process->pid, this->trdid, (cluster - 1), free_clusters ); 2114 2155 #endif 2115 2156 … … 2127 2168 } 2128 2169 2129 // check "hint" 2170 2171 2172 // get page index & slot index for selected cluster 2173 page_id = cluster >> 10; 2174 slot_id = cluster & 0x3FF; 2175 2176 // get relevant page descriptor from mapper 2177 page_xp = mapper_remote_get_page( mapper_xp , page_id ); 2178 2179 if( page_xp == XPTR_NULL ) 2180 { 2181 printk("\n[ERROR] in %s : cannot acces FAT mapper\n", __FUNCTION__ ); 2182 return -1; 2183 } 2184 2185 // build extended pointer on selected cluster slot in FAT mapper 2186 slot_xp = ppm_page2base( page_xp ) + (slot_id << 2); 2187 2188 // check selected cluster actually free 2130 2189 if( hal_remote_l32( slot_xp ) != FREE_CLUSTER ) 2131 2190 { 2132 printk("\n[ERROR] in %s : illegal hint cluster\n", __FUNCTION__);2191 printk("\n[ERROR] in %s : selected cluster %x not free\n", __FUNCTION__, cluster ); 2133 2192 remote_queuelock_acquire( lock_xp ); 2134 2193 return -1; 2135 2194 } 2136 2195 2137 // update allocated cluster in FAT mapper 2138 hal_remote_s32( slot_xp , END_OF_CHAIN_CLUSTER_MAX ); 2139 2140 // update free cluster info 2141 fatfs_free_clusters_decrement( hint + 1 ); 2196 // update free cluster info in FATFS context 2197 fatfs_free_clusters_decrement( mapper_cxy , fat_fatfs_ctx , cluster ); 2142 2198 2143 2199 // release free clusters busylock 2144 2200 remote_queuelock_release( lock_xp ); 2201 2202 // update FAT mapper 2203 hal_remote_s32( slot_xp , END_OF_CHAIN_CLUSTER_MAX ); 2204 2205 // synchronously update FAT on device 2206 fatfs_move_page( page_xp , IOC_SYNC_WRITE ); 2145 2207 2146 2208 #if DEBUG_FATFS_CLUSTER_ALLOC 2147 2209 cycle = (uint32_t)hal_get_cycles(); 2148 2210 if( DEBUG_FATFS_CLUSTER_ALLOC < cycle ) 2149 printk("\n[%s] thread[%x,%x] exit / cluster %x/ cycle %d\n",2150 __FUNCTION__, this->process->pid, this->trdid, hint + 1, cycle );2151 #endif 2152 2153 *searched_cluster = hint + 1;2211 printk("\n[%s] thread[%x,%x] exit / updated cluster %x in FAT / cycle %d\n", 2212 __FUNCTION__, this->process->pid, this->trdid, cluster, cycle ); 2213 #endif 2214 2215 *searched_cluster = cluster; 2154 2216 return 0; 2155 2217 … … 2164 2226 xptr_t mapper_xp; // extended pointer on FAT mapper 2165 2227 cxy_t mapper_cxy; // Fat mapper cluster identifier 2228 mapper_t * mapper_ptr; // local pointer on FAT mapper 2166 2229 xptr_t lock_xp; // extended pointer on lock protecting free clusters info. 2167 2230 xptr_t first_xp; // extended pointer on inode extension … … 2204 2267 loc_fatfs_ctx = vfs_ctx->extend; 2205 2268 2206 // get extended pointerand cluster on FAT mapper2269 // get pointers and cluster on FAT mapper 2207 2270 mapper_xp = loc_fatfs_ctx->fat_mapper_xp; 2208 2271 mapper_cxy = GET_CXY( mapper_xp ); 2272 mapper_ptr = GET_PTR( mapper_xp ); 2209 2273 2210 2274 // get local pointer on FATFS context in FAT cluster … … 2218 2282 2219 2283 // call the recursive function to release all clusters from FAT mapper 2220 if ( fatfs_recursive_release( mapper_xp , first_cluster ) ) 2284 if ( fatfs_recursive_release( mapper_cxy, 2285 mapper_ptr, 2286 fat_fatfs_ctx, 2287 first_cluster ) ) 2221 2288 { 2222 2289 printk("\n[ERROR] in %s : cannot update FAT mapper\n", __FUNCTION__ ); … … 2300 2367 { 2301 2368 // get lba from FATFS context and page_id 2302 uint32_t lba 2369 uint32_t lba = fatfs_ctx->fat_begin_lba + (page_id << 3); 2303 2370 2304 2371 // access device … … 2311 2378 if( error ) return EIO; 2312 2379 2313 #if (DEBUG_FATFS_MOVE_PAGE & 0x1)2314 if( DEBUG_FATFS_MOVE_PAGE < cycle )2315 mapper_display_page( XPTR(page_cxy , mapper_ptr) , page_id );2316 #endif2317 2318 2380 #if DEBUG_FATFS_MOVE_PAGE 2319 cycle = (uint32_t)hal_get_cycles();2320 2381 if( DEBUG_FATFS_MOVE_PAGE < cycle ) 2321 2382 { … … 2357 2418 page_id, 2358 2419 &searched_cluster ); 2359 if( error ) return EIO; 2420 if( error ) 2421 { 2422 printk("\n[ERROR] in %s : cannot access FAT mapper\n", __FUNCTION__ ); 2423 return -1; 2424 } 2360 2425 } 2361 2426 2362 2427 // get lba from searched_cluster 2363 2428 uint32_t lba = fatfs_lba_from_cluster( fatfs_ctx , searched_cluster ); 2429 2430 #if DEBUG_FATFS_MOVE_PAGE 2431 if( DEBUG_FATFS_MOVE_PAGE < cycle ) 2432 { 2433 if ( (cmd_type == IOC_READ) || (cmd_type == IOC_SYNC_READ) ) 2434 printk("\n[%s] thread[%x,%x] load page %d of <%s> / cluster_id %x / cycle %d\n", 2435 __FUNCTION__, this->process->pid, this->trdid, page_id, name, searched_cluster, cycle ); 2436 else 2437 printk("\n[%s] thread[%x,%x] sync page %d of <%s> / cluster_id %x / cycle %d\n", 2438 __FUNCTION__, this->process->pid, this->trdid, page_id, name, searched_cluster, cycle ); 2439 } 2440 #endif 2364 2441 2365 2442 // access device … … 2370 2447 else error = -1; 2371 2448 2372 if( error ) return EIO; 2373 2374 #if (DEBUG_FATFS_MOVE_PAGE & 0x1) 2375 if( DEBUG_FATFS_MOVE_PAGE < cycle ) 2376 mapper_display_page( XPTR(page_cxy , mapper_ptr) , page_id ); 2377 #endif 2378 2379 #if DEBUG_FATFS_MOVE_PAGE 2380 cycle = (uint32_t)hal_get_cycles(); 2381 if(DEBUG_FATFS_MOVE_PAGE < cycle) 2382 { 2383 if ( (cmd_type == IOC_READ) || (cmd_type == IOC_SYNC_READ) ) 2384 printk("\n[%s] thread[%x,%x] load page %d of <%s> inode / cycle %d\n", 2385 __FUNCTION__, this->process->pid, this->trdid, page_id, name, cycle ); 2386 else 2387 printk("\n[%s] thread[%x,%x] sync page %d of <%s> inode / cycle %d\n", 2388 __FUNCTION__, this->process->pid, this->trdid, page_id, name, cycle ); 2389 } 2390 #endif 2391 2449 if( error ) 2450 { 2451 printk("\n[ERROR] in %s : cannot access device\n", __FUNCTION__ ); 2452 return -1; 2453 } 2392 2454 } 2393 2455 -
trunk/kernel/fs/fatfs.h
r623 r625 35 35 // The FATFS File System implements a FAT32 read/write file system. 36 36 // 37 // The FATFS extensions to the generic VFS are the following:37 // The FATFS specific extensions to the generic VFS are the following: 38 38 // 39 39 // 1) The vfs_ctx_t "extend" field is a void* pointing on the fatfs_ctx_t structure. … … 190 190 uint32_t root_dir_cluster; /*! cluster index for root directory */ 191 191 xptr_t fat_mapper_xp; /*! extended pointer on FAT mapper */ 192 uint32_t free_cluster_hint; /*! start point to search free cluster*/192 uint32_t free_cluster_hint; /*! cluster[hint+1] is the first free */ 193 193 uint32_t free_clusters; /*! free clusters number */ 194 194 remote_queuelock_t free_lock; /*! exclusive access to hint & number */ … … 224 224 225 225 /***************************************************************************************** 226 * This function display the content of the FATFS context. 227 ****************************************************************************************/ 228 void fatfs_ctx_display( void ); 226 * This function display the content of the local FATFS context. 227 ***************************************************************************************** 228 * @ ctx : local pointer on the context. 229 ****************************************************************************************/ 230 void fatfs_ctx_display( fatfs_ctx_t * ctx ); 229 231 230 232 /***************************************************************************************** … … 312 314 ***************************************************************************************** 313 315 * It initializes a new inode/dentry couple in Inode Tree, attached to the directory 314 * identified by the <parent_inode> argument. The newdirectory entry is identified315 * by the <name> argument. The child inode descriptor identified by the <child_inode_xp>316 * argument, and the dentry descriptor must have been previously allocated.316 * identified by the <parent_inode> argument. The directory entry is identified 317 * by the <name> argument. The child inode descriptor, identified by the <child_inode_xp> 318 * argument, and the associated dentry descriptor must have been previously allocated. 317 319 * It scan the parent mapper to find the <name> argument. 318 * It set the "type", "size", and "extend" fields in inode descriptor.319 * It set the " extend" field in dentry descriptor.320 * It set the "type", "size", and "extend" fields in the child inode descriptor. 321 * It set the " extend" field in the dentry descriptor. 320 322 * It must be called by a thread running in the cluster containing the parent inode. 321 323 ***************************************************************************************** … … 333 335 ***************************************************************************************** 334 336 * It update the size of a directory entry identified by the <dentry> argument in 335 * the mapper of a directory identified by the <inode> argument, as defined by the <size>336 * argument.337 * the mapper of a directory identified by the <inode> argument, as defined by the 338 * <size> argument. 337 339 * It scan the mapper to find the entry identified by the dentry "name" field. 338 340 * It set the "size" field in the in the directory mapper AND marks the page as DIRTY. … … 427 429 * in <searched_cluster> the FATFS cluster index of a free cluster. 428 430 * It can be called by a thread running in any cluster, as it uses remote access 429 * primitives when the FAT mapper is remote. It takes the "free_lock" stored in the 430 * FATFS context located in the same cluster as the FAT mapper itself, to get exclusive 431 * access to the FAT. It uses (and updates) the <free_cluster_hint> and <free_clusters> 432 * shared variables in this FATFS context. 433 * It updates the FAT mapper, and synchronously updates the FAT region on IOC device. 434 * The FAT mapper being a cache, this function updates the FAT mapper from informations 435 * stored on IOC device in case of miss. 431 * primitives when the FAT mapper is remote. It takes the queuelock stored in the FATFS 432 * context (located in the same cluster as the FAT mapper itself), to get exclusive 433 * access to the FAT. It uses the <free_cluster_hint> and <free_clusters> variables 434 * stored in this FATFS context. 435 * - it updates the <free_cluster_hint> and <free_clusters> variables in FATFS context. 436 * - it updates the FAT mapper (handling miss from IOC device if required). 437 * - it synchronously updates the FAT region on IOC device. 438 * - it returns the allocated cluster index. 436 439 ***************************************************************************************** 437 440 * @ searched_cluster : [out] found FATFS cluster index. … … 461 464 * This function moves a page from/to the mapper to/from the FATFS file system on device. 462 465 * The page must have been previously allocated and registered in the mapper. 463 * The page - and the mapper - can be located in another cluster than the calling thread.464 466 * The pointer on the mapper and the page index in file are found in the page descriptor. 465 467 * It is used for both a regular file/directory mapper, and the FAT mapper. -
trunk/kernel/fs/vfs.c
r623 r625 175 175 else 176 176 { 177 ctx = NULL;178 assert( false , "illegal file system type = %d\n" , fs_type );177 printk("\n[ERROR] in %s : illegal FS type\n", __FUNCTION__ ); 178 return -1; 179 179 } 180 180 … … 185 185 { 186 186 printk("\n[ERROR] in %s : cannot allocate inum\n", __FUNCTION__ ); 187 return ENOMEM;187 return -1; 188 188 } 189 189 … … 378 378 { 379 379 380 assert( (inode != NULL) , "inode pointer is NULL \n" );380 assert( (inode != NULL) , "inode pointer is NULL" ); 381 381 382 382 uint32_t page_id; … … 386 386 uint32_t size = inode->size; 387 387 388 assert( (mapper != NULL) , "mapper pointer is NULL \n" );388 assert( (mapper != NULL) , "mapper pointer is NULL" ); 389 389 390 390 #if DEBUG_VFS_INODE_LOAD_ALL … … 560 560 void vfs_file_destroy( vfs_file_t * file ) 561 561 { 562 563 // check refcount564 // assert( (file->refcount == 0) , "refcount non zero\n" );565 566 562 kmem_req_t req; 567 563 req.ptr = file; … … 766 762 767 763 // check argument 768 assert( (file_xp != XPTR_NULL), "file_xp == XPTR_NULL \n" );764 assert( (file_xp != XPTR_NULL), "file_xp == XPTR_NULL" ); 769 765 770 766 // get cluster and local pointer on remote file descriptor … … 776 772 777 773 // check inode type 778 assert( (inode_type == INODE_TYPE_FILE), " inode type is not INODE_TYPE_FILE" );774 assert( (inode_type == INODE_TYPE_FILE), "bad inode type" ); 779 775 780 776 // get mapper pointer and file offset from file descriptor 781 777 file_offset = hal_remote_l32( XPTR( file_cxy , &file_ptr->offset ) ); 782 mapper = (mapper_t *)hal_remote_lpt( XPTR( file_cxy , &file_ptr->mapper ) );778 mapper = hal_remote_lpt( XPTR( file_cxy , &file_ptr->mapper ) ); 783 779 784 780 // move data between mapper and buffer … … 788 784 buffer, 789 785 size ); 786 if( error ) 787 { 788 printk("\n[ERROR] in %s : cannot move data", __FUNCTION__ ); 789 return -1; 790 } 790 791 791 792 // update file offset in file descriptor 792 793 hal_remote_atomic_add( XPTR( file_cxy , &file_ptr->offset ) , size ); 793 794 794 if( error ) 795 { 796 return -1; 797 } 795 #if DEBUG_VFS_USER_MOVE 796 char name[CONFIG_VFS_MAX_NAME_LENGTH]; 797 uint32_t cycle = (uint32_t)hal_get_cycles(); 798 thread_t * this = CURRENT_THREAD; 799 vfs_inode_t * inode = hal_remote_lpt( XPTR( file_cxy , &file_ptr->inode ) ); 800 vfs_inode_get_name( XPTR( file_cxy , inode ) , name ); 801 if( cycle > DEBUG_VFS_USER_MOVE ) 802 { 803 if( to_buffer ) 804 printk("\n[%s] thread[%x,%x] moves %d bytes from <%s> mapper to buffer (%x) / cycle %d\n", 805 __FUNCTION__ , this->process->pid, this->trdid, size, name, buffer ); 806 else 807 printk("\n[%s] thread[%x,%x] moves %d bytes from buffer (%x) to <%s> mapper / cycle %d\n", 808 __FUNCTION__ , this->process->pid, this->trdid, size, buffer, name ); 809 } 810 #endif 798 811 799 812 return size; … … 816 829 817 830 // check argument 818 assert( (file_xp != XPTR_NULL) , "file_xp == XPTR_NULL \n" );831 assert( (file_xp != XPTR_NULL) , "file_xp == XPTR_NULL" ); 819 832 820 833 // get cluster and local pointer on remote file descriptor … … 825 838 inode_type = hal_remote_l32( XPTR( file_cxy , &file_ptr->type ) ); 826 839 827 // action depends on inode type 828 if( inode_type == INODE_TYPE_FILE ) 829 { 830 // get mapper pointers and file offset from file descriptor 831 file_offset = hal_remote_l32( XPTR( file_cxy , &file_ptr->offset ) ); 832 mapper_ptr = hal_remote_lpt( XPTR( file_cxy , &file_ptr->mapper ) ); 833 mapper_xp = XPTR( file_cxy , mapper_ptr ); 834 835 // move data between mapper and buffer 836 error = mapper_move_kernel( mapper_xp, 837 to_buffer, 838 file_offset, 839 buffer_xp, 840 size ); 841 if( error ) return -1; 842 } 843 else 844 { 845 printk("\n[ERROR] in %s : inode is not a file", __FUNCTION__ ); 840 // check inode type 841 assert( (inode_type == INODE_TYPE_FILE), "bad file type" ); 842 843 // get mapper pointers and file offset from file descriptor 844 file_offset = hal_remote_l32( XPTR( file_cxy , &file_ptr->offset ) ); 845 mapper_ptr = hal_remote_lpt( XPTR( file_cxy , &file_ptr->mapper ) ); 846 mapper_xp = XPTR( file_cxy , mapper_ptr ); 847 848 // move data between mapper and buffer 849 error = mapper_move_kernel( mapper_xp, 850 to_buffer, 851 file_offset, 852 buffer_xp, 853 size ); 854 if( error ) 855 { 856 printk("\n[ERROR] in %s : cannot move data", __FUNCTION__ ); 846 857 return -1; 847 858 } 859 860 #if DEBUG_VFS_KERNEL_MOVE 861 char name[CONFIG_VFS_MAX_NAME_LENGTH]; 862 uint32_t cycle = (uint32_t)hal_get_cycles(); 863 thread_t * this = CURRENT_THREAD; 864 cxy_t buffer_cxy = GET_CXY( buffer_xp ); 865 void * buffer_ptr = GET_PTR( buffer_xp ); 866 vfs_inode_t * inode = hal_remote_lpt( XPTR( file_cxy , &file_ptr->inode ) ); 867 vfs_inode_get_name( XPTR( file_cxy , inode ) , name ); 868 if( cycle > DEBUG_VFS_KERNEL_MOVE ) 869 { 870 if( to_buffer ) 871 printk("\n[%s] thread[%x,%x] moves %d bytes from <%s> mapper to buffer(%x,%x) / cycle %d\n", 872 __FUNCTION__ , this->process->pid, this->trdid, size, name, buffer_cxy, buffer_ptr ); 873 else 874 printk("\n[%s] thread[%x,%x] moves %d bytes from buffer(%x,%x) to <%s> mapper / cycle %d\n", 875 __FUNCTION__ , this->process->pid, this->trdid, size, buffer_cxy, buffer_ptr, name ); 876 } 877 #endif 848 878 849 879 return 0; … … 866 896 867 897 // check argument 868 assert( (file_xp != XPTR_NULL) , "file_xp == XPTR_NULL \n" );898 assert( (file_xp != XPTR_NULL) , "file_xp == XPTR_NULL" ); 869 899 870 900 // get cluster and local pointer on remote file descriptor … … 946 976 947 977 // check argument 948 assert( (file_xp != XPTR_NULL) , "file_xp is XPTR_NULL \n" );978 assert( (file_xp != XPTR_NULL) , "file_xp is XPTR_NULL" ); 949 979 950 980 thread_t * this = CURRENT_THREAD; … … 997 1027 #endif 998 1028 999 //////// 2) update file size in all parent directory mapper(s) and ondevice1029 //////// 2) update file size in all parent directory mapper(s) and update device 1000 1030 1001 1031 // get pointers on remote inode … … 1052 1082 vfs_inode_get_name( XPTR( parent_cxy , parent_inode_ptr ) , parent_name ); 1053 1083 if( DEBUG_VFS_CLOSE < cycle ) 1054 printk("\n[%s] thread[%x,%x] updated size of <%s> in parent <%s>\n",1055 __FUNCTION__, process->pid, this->trdid, name, parent_name );1084 printk("\n[%s] thread[%x,%x] updated <%s> in <%s> / size = %d bytes\n", 1085 __FUNCTION__, process->pid, this->trdid, name, parent_name, size ); 1056 1086 #endif 1057 1087 … … 1114 1144 #if DEBUG_VFS_CLOSE 1115 1145 if( DEBUG_VFS_CLOSE < cycle ) 1116 printk("\n[%s] thread[%x,%x] reset all fd-array copies for <% x>\n",1146 printk("\n[%s] thread[%x,%x] reset all fd-array copies for <%s>\n", 1117 1147 __FUNCTION__, process->pid, this->trdid, name ); 1118 1148 #endif … … 1132 1162 cycle = (uint32_t)hal_get_cycles(); 1133 1163 if( DEBUG_VFS_CLOSE < cycle ) 1134 printk("\n[%s] thread[%x,%x] exit / <%s> closed/ cycle %d\n",1135 __FUNCTION__, process->pid, this->trdid, name, cycle );1164 printk("\n[%s] thread[%x,%x] exit / closed <%s> in process %x / cycle %d\n", 1165 __FUNCTION__, process->pid, this->trdid, name, process->pid, cycle ); 1136 1166 #endif 1137 1167 … … 2029 2059 vfs_inode_type_t inode_type; // target inode type 2030 2060 2031 // set lookup working mode 2032 assert( (rights == 0), __FUNCTION__, 2033 "access rights non implemented yet\n" ); 2061 // check lookup working mode 2062 assert( (rights == 0), "access rights non implemented yet" ); 2034 2063 2035 2064 // get extended pointer on target inode … … 2051 2080 // TODO implement this function 2052 2081 2053 assert( false , "not implemented \n" );2082 assert( false , "not implemented" ); 2054 2083 2055 2084 return 0; … … 2061 2090 uint32_t rights ) 2062 2091 { 2063 assert( false , "not implemented cwd_xp: %x, path <%s>, rights %x\n", 2064 cwd_xp, path, rights ); 2092 assert( false , "not implemented %l %x %x", cwd_xp, path, rights ); 2065 2093 return 0; 2066 2094 } … … 2084 2112 vfs_inode_type_t inode_type; 2085 2113 uint32_t inode_size; 2086 uint32_t inode_inum;2087 2114 uint32_t inode_attr; 2088 2115 uint32_t inode_dirty; 2116 void * inode_extd; 2117 2089 2118 xptr_t children_xp; // extended pointer on children xhtab 2090 2119 … … 2115 2144 " " }; // level 15 2116 2145 2117 assert( (inode_xp != XPTR_NULL) , "inode_xp cannot be NULL \n" );2118 assert( (name_xp != XPTR_NULL) , "name_xp cannot be NULL \n" );2119 assert( (indent < 16) , "depth cannot be larger than 15 \n" );2146 assert( (inode_xp != XPTR_NULL) , "inode_xp cannot be NULL" ); 2147 assert( (name_xp != XPTR_NULL) , "name_xp cannot be NULL" ); 2148 assert( (indent < 16) , "depth cannot be larger than 15" ); 2120 2149 2121 2150 // get current inode cluster and local pointer … … 2126 2155 inode_type = hal_remote_l32( XPTR( inode_cxy , &inode_ptr->type ) ); 2127 2156 inode_size = hal_remote_l32( XPTR( inode_cxy , &inode_ptr->size ) ); 2128 inode_inum = hal_remote_l32( XPTR( inode_cxy , &inode_ptr->inum ) );2129 2157 inode_attr = hal_remote_l32( XPTR( inode_cxy , &inode_ptr->attr ) ); 2158 inode_extd = hal_remote_lpt( XPTR( inode_cxy , &inode_ptr->extend ) ); 2130 2159 mapper_ptr = hal_remote_lpt( XPTR( inode_cxy , &inode_ptr->mapper ) ); 2131 2160 … … 2137 2166 2138 2167 // display inode 2139 nolock_printk("%s<%s> : %s / inum%d / %d bytes / dirty %d / cxy %x / inode %x / mapper %x\n",2140 indent_str[indent], name, vfs_inode_type_str( inode_type ),2141 inode_inum,inode_size, inode_dirty, inode_cxy, inode_ptr, mapper_ptr );2168 nolock_printk("%s<%s> : %s / extd %d / %d bytes / dirty %d / cxy %x / inode %x / mapper %x\n", 2169 indent_str[indent], name, vfs_inode_type_str( inode_type ), (uint32_t)inode_extd, 2170 inode_size, inode_dirty, inode_cxy, inode_ptr, mapper_ptr ); 2142 2171 2143 2172 // scan directory entries when current inode is a directory … … 2405 2434 // check pathname / root_xp consistency 2406 2435 assert( ((pathname[0] != '/') || (root_xp == process->vfs_root_xp)), 2407 "root inode must be VFS root for path <%s> \n", pathname );2436 "root inode must be VFS root for path <%s>", pathname ); 2408 2437 2409 2438 #if DEBUG_VFS_LOOKUP … … 2550 2579 if ( error ) // child not found in parent mapper 2551 2580 { 2552 if ( last && create ) // add a brand new dentry in parent 2581 if ( last && create ) // add a brand new dentry in parent directory 2553 2582 { 2554 2583 error = vfs_new_dentry_init( parent_xp, … … 2705 2734 uint32_t child_size; 2706 2735 2707 #if DEBUG_VFS_NEW_ CHILD_INIT2736 #if DEBUG_VFS_NEW_DENTRY_INIT 2708 2737 char parent_name[CONFIG_VFS_MAX_NAME_LENGTH]; 2709 2738 char child_name[CONFIG_VFS_MAX_NAME_LENGTH]; … … 2712 2741 uint32_t cycle = (uint32_t)hal_get_cycles(); 2713 2742 thread_t * this = CURRENT_THREAD; 2714 if( DEBUG_VFS_NEW_ CHILD_INIT < cycle )2743 if( DEBUG_VFS_NEW_DENTRY_INIT < cycle ) 2715 2744 printk("\n[%s] thread[%x,%x] enter / parent <%s> / child <%s> / cycle %d\n", 2716 2745 __FUNCTION__ , this->process->pid, this->trdid, parent_name, child_name, cycle ); … … 2741 2770 } 2742 2771 2743 #if( DEBUG_VFS_NEW_ CHILD_INIT & 1)2744 if( DEBUG_VFS_NEW_ CHILD_INIT < cycle )2745 printk("\n[%s] thread[%x,%x] allocated one FAT clusterto <%s>\n",2746 __FUNCTION__ , this->process->pid, this->trdid, c hild_name );2772 #if( DEBUG_VFS_NEW_DENTRY_INIT & 1) 2773 if( DEBUG_VFS_NEW_DENTRY_INIT < cycle ) 2774 printk("\n[%s] thread[%x,%x] allocated FAT cluster %x to <%s>\n", 2775 __FUNCTION__ , this->process->pid, this->trdid, cluster, child_name ); 2747 2776 #endif 2748 2777 … … 2775 2804 } 2776 2805 2777 #if DEBUG_VFS_NEW_ CHILD_INIT2806 #if DEBUG_VFS_NEW_DENTRY_INIT 2778 2807 cycle = (uint32_t)hal_get_cycles(); 2779 if( DEBUG_VFS_NEW_ CHILD_INIT < cycle )2808 if( DEBUG_VFS_NEW_DENTRY_INIT < cycle ) 2780 2809 printk("\n[%s] thread[%x,%x] exit / parent <%s> / child <%s> / cycle %d\n", 2781 2810 __FUNCTION__ , this->process->pid, this->trdid, parent_name, child_name, cycle ); … … 3085 3114 3086 3115 // check buffer overflow 3087 assert( (index >= 0) , "kernel buffer too small \n" );3116 assert( (index >= 0) , "kernel buffer too small" ); 3088 3117 3089 3118 } … … 3111 3140 3112 3141 // check buffer overflow 3113 assert( (index >= 0) , "kernel buffer too small \n" );3142 assert( (index >= 0) , "kernel buffer too small" ); 3114 3143 3115 3144 // update pathname … … 3379 3408 error_t error = 0; 3380 3409 3381 assert( (page_xp != XPTR_NULL) , "page pointer is NULL \n" );3410 assert( (page_xp != XPTR_NULL) , "page pointer is NULL" ); 3382 3411 3383 3412 page_t * page_ptr = GET_PTR( page_xp ); … … 3387 3416 mapper_t * mapper = hal_remote_lpt( XPTR( page_cxy , &page_ptr->mapper ) ); 3388 3417 3389 assert( (mapper != NULL) , "no mapper for page \n" );3418 assert( (mapper != NULL) , "no mapper for page" ); 3390 3419 3391 3420 // get FS type … … 3407 3436 else 3408 3437 { 3409 assert( false , "undefined file system type \n" );3438 assert( false , "undefined file system type" ); 3410 3439 } 3411 3440 … … 3420 3449 error_t error = 0; 3421 3450 3422 assert( (inode != NULL) , "inode pointer is NULL \n" );3423 assert( (dentry != NULL) , "dentry pointer is NULL \n" );3451 assert( (inode != NULL) , "inode pointer is NULL" ); 3452 assert( (dentry != NULL) , "dentry pointer is NULL" ); 3424 3453 3425 3454 mapper_t * mapper = inode->mapper; 3426 3455 3427 assert( (mapper != NULL) , "mapper pointer is NULL \n" );3456 assert( (mapper != NULL) , "mapper pointer is NULL" ); 3428 3457 3429 3458 // get FS type … … 3445 3474 else 3446 3475 { 3447 assert( false , "undefined file system type \n" );3476 assert( false , "undefined file system type" ); 3448 3477 } 3449 3478 … … 3458 3487 error_t error = 0; 3459 3488 3460 assert( (inode != NULL) , "inode pointer is NULL \n" );3461 assert( (dentry != NULL) , "dentry pointer is NULL \n" );3489 assert( (inode != NULL) , "inode pointer is NULL" ); 3490 assert( (dentry != NULL) , "dentry pointer is NULL" ); 3462 3491 3463 3492 mapper_t * mapper = inode->mapper; 3464 3493 3465 assert( (mapper != NULL) , "mapper pointer is NULL \n" );3494 assert( (mapper != NULL) , "mapper pointer is NULL" ); 3466 3495 3467 3496 // get FS type … … 3483 3512 else 3484 3513 { 3485 assert( false , "undefined file system type \n" );3514 assert( false , "undefined file system type" ); 3486 3515 } 3487 3516 … … 3498 3527 3499 3528 // check arguments 3500 assert( (parent != NULL) , "parent pointer is NULL \n");3501 assert( (child_xp != XPTR_NULL) , "child pointer is NULL \n");3529 assert( (parent != NULL) , "parent pointer is NULL"); 3530 assert( (child_xp != XPTR_NULL) , "child pointer is NULL"); 3502 3531 3503 3532 // get parent inode FS type … … 3511 3540 else if( fs_type == FS_TYPE_RAMFS ) 3512 3541 { 3513 assert( false , "should not be called for RAMFS \n" );3542 assert( false , "should not be called for RAMFS" ); 3514 3543 } 3515 3544 else if( fs_type == FS_TYPE_DEVFS ) 3516 3545 { 3517 assert( false , "should not be called for DEVFS \n" );3546 assert( false , "should not be called for DEVFS" ); 3518 3547 } 3519 3548 else 3520 3549 { 3521 assert( false , "undefined file system type \n" );3550 assert( false , "undefined file system type" ); 3522 3551 } 3523 3552 … … 3534 3563 3535 3564 // check arguments 3536 assert( (inode != NULL) , "inode pointer is NULL \n");3537 assert( (dentry != NULL) , "dentry pointer is NULL \n");3565 assert( (inode != NULL) , "inode pointer is NULL"); 3566 assert( (dentry != NULL) , "dentry pointer is NULL"); 3538 3567 3539 3568 // get parent inode FS type … … 3547 3576 else if( fs_type == FS_TYPE_RAMFS ) 3548 3577 { 3549 assert( false , "should not be called for RAMFS \n" );3578 assert( false , "should not be called for RAMFS" ); 3550 3579 } 3551 3580 else if( fs_type == FS_TYPE_DEVFS ) 3552 3581 { 3553 assert( false , "should not be called for DEVFS \n" );3582 assert( false , "should not be called for DEVFS" ); 3554 3583 } 3555 3584 else 3556 3585 { 3557 assert( false , "undefined file system type \n" );3586 assert( false , "undefined file system type" ); 3558 3587 } 3559 3588 … … 3574 3603 3575 3604 // check arguments 3576 assert( (inode != NULL) , "parent pointer is NULL \n");3577 assert( (array != NULL) , "child pointer is NULL \n");3605 assert( (inode != NULL) , "parent pointer is NULL"); 3606 assert( (array != NULL) , "child pointer is NULL"); 3578 3607 assert( (detailed == false) , "detailed argument not supported\n"); 3579 3608 … … 3602 3631 else if( fs_type == FS_TYPE_RAMFS ) 3603 3632 { 3604 assert( false , "should not be called for RAMFS \n" );3633 assert( false , "should not be called for RAMFS" ); 3605 3634 } 3606 3635 else if( fs_type == FS_TYPE_DEVFS ) … … 3616 3645 else 3617 3646 { 3618 assert( false , "undefined file system type \n" );3647 assert( false , "undefined file system type" ); 3619 3648 } 3620 3649 … … 3629 3658 3630 3659 // check arguments 3631 assert( (inode != NULL) , "inode pointer is NULL \n");3660 assert( (inode != NULL) , "inode pointer is NULL"); 3632 3661 3633 3662 // get inode FS type … … 3641 3670 else if( fs_type == FS_TYPE_RAMFS ) 3642 3671 { 3643 assert( false , "should not be called for RAMFS \n" );3672 assert( false , "should not be called for RAMFS" ); 3644 3673 } 3645 3674 else if( fs_type == FS_TYPE_DEVFS ) 3646 3675 { 3647 assert( false , "should not be called for DEVFS \n" );3676 assert( false , "should not be called for DEVFS" ); 3648 3677 } 3649 3678 else 3650 3679 { 3651 assert( false , "undefined file system type \n" );3680 assert( false , "undefined file system type" ); 3652 3681 } 3653 3682 … … 3668 3697 else if( fs_type == FS_TYPE_RAMFS ) 3669 3698 { 3670 assert( false , "should not be called for RAMFS \n" );3699 assert( false , "should not be called for RAMFS" ); 3671 3700 } 3672 3701 else if( fs_type == FS_TYPE_DEVFS ) 3673 3702 { 3674 assert( false , "should not be called for DEVFS \n" );3703 assert( false , "should not be called for DEVFS" ); 3675 3704 } 3676 3705 else 3677 3706 { 3678 assert( false , "undefined file system type \n" );3707 assert( false , "undefined file system type" ); 3679 3708 } 3680 3709 … … 3695 3724 else if( fs_type == FS_TYPE_RAMFS ) 3696 3725 { 3697 assert( false , "should not be called for RAMFS \n" );3726 assert( false , "should not be called for RAMFS" ); 3698 3727 } 3699 3728 else if( fs_type == FS_TYPE_DEVFS ) 3700 3729 { 3701 assert( false , "should not be called for DEVFS \n" );3730 assert( false , "should not be called for DEVFS" ); 3702 3731 } 3703 3732 else 3704 3733 { 3705 assert( false , "undefined file system type \n" );3734 assert( false , "undefined file system type" ); 3706 3735 } 3707 3736 … … 3723 3752 else if( fs_type == FS_TYPE_RAMFS ) 3724 3753 { 3725 assert( false , "should not be called for RAMFS \n" );3754 assert( false , "should not be called for RAMFS" ); 3726 3755 } 3727 3756 else if( fs_type == FS_TYPE_DEVFS ) 3728 3757 { 3729 assert( false , "should not be called for DEVFS \n" );3758 assert( false , "should not be called for DEVFS" ); 3730 3759 } 3731 3760 else 3732 3761 { 3733 assert( false , "undefined file system type \n" );3762 assert( false , "undefined file system type" ); 3734 3763 } 3735 3764 … … 3743 3772 error_t error = 0; 3744 3773 3745 assert( (inode_xp != XPTR_NULL) , "inode pointer is NULL \n")3774 assert( (inode_xp != XPTR_NULL) , "inode pointer is NULL") 3746 3775 3747 3776 vfs_inode_t * inode_ptr = GET_PTR( inode_xp ); … … 3751 3780 mapper_t * mapper = hal_remote_lpt( XPTR( inode_cxy , &inode_ptr->mapper ) ); 3752 3781 3753 assert( (mapper != NULL) , "mapper pointer is NULL \n")3782 assert( (mapper != NULL) , "mapper pointer is NULL") 3754 3783 3755 3784 // get FS type from mapper … … 3763 3792 else if( fs_type == FS_TYPE_RAMFS ) 3764 3793 { 3765 assert( false , "should not be called for RAMFS \n" );3794 assert( false , "should not be called for RAMFS" ); 3766 3795 } 3767 3796 else if( fs_type == FS_TYPE_DEVFS ) 3768 3797 { 3769 assert( false , "should not be called for DEVFS \n" );3798 assert( false , "should not be called for DEVFS" ); 3770 3799 } 3771 3800 else 3772 3801 { 3773 assert( false , "undefined file system type \n" );3802 assert( false , "undefined file system type" ); 3774 3803 } 3775 3804 -
trunk/kernel/fs/vfs.h
r623 r625 593 593 * This function is called by the vfs_lookup() function when a new dentry/inode must 594 594 * be created from scratch and introduced in both the Inode Tree and the IOC device. 595 * The dentry and inode descriptors have been created by the caller :595 * The dentry and inode descriptors have been created by the caller. 596 596 * - It allocates one cluster from the relevant FS, and updates the File Allocation 597 597 * Table (both the FAT mapper, and the IOC device). … … 966 966 * the <inode> argument, to find a directory entry identified by the <dentry> argument, 967 967 * and update the size for this directory entry in mapper, as defined by <size>. 968 * The searched "name" is defined in the <dentry> argument, that must be in the same969 * cluster as the parent inode.It is called by the vfs_close() function.968 * The parent directory on device is synchronously updated. 969 * It is called by the vfs_close() function. 970 970 * 971 971 * Depending on the file system type, it calls the relevant, FS specific function. -
trunk/kernel/kern/chdev.c
r619 r625 138 138 uint32_t server_lid; // core running the server thread local index 139 139 xptr_t lock_xp; // extended pointer on lock protecting the chdev state 140 uint32_t save_sr; // for critical section141 140 142 141 #if (DEBUG_SYS_READ & 1) … … 177 176 uint32_t rx_cycle = (uint32_t)hal_get_cycles(); 178 177 if( (is_rx) && (DEBUG_CHDEV_CMD_RX < rx_cycle) ) 179 printk("\n[%s] client [%x,%x] enter for RX / server[%x,%x] / cycle %d\n",178 printk("\n[%s] client thread[%x,%x] enter for RX / server[%x,%x] / cycle %d\n", 180 179 __FUNCTION__, this->process->pid, this->trdid, server_pid, server_trdid, rx_cycle ); 181 180 #endif … … 184 183 uint32_t tx_cycle = (uint32_t)hal_get_cycles(); 185 184 if( (is_rx == 0) && (DEBUG_CHDEV_CMD_TX < tx_cycle) ) 186 printk("\n[%s] client [%x,%x] enter for TX / server[%x,%x] / cycle %d\n",185 printk("\n[%s] client thread[%x,%x] enter for TX / server[%x,%x] / cycle %d\n", 187 186 __FUNCTION__, this->process->pid, this->trdid, server_pid, server_trdid, tx_cycle ); 188 187 #endif … … 194 193 xptr_t root_xp = XPTR( chdev_cxy , &chdev_ptr->wait_root ); 195 194 196 // build extended pointer on server thread blocked state197 xptr_t blocked_xp = XPTR( chdev_cxy , &server_ptr->blocked );198 199 195 // build extended pointer on lock protecting chdev waiting queue 200 196 lock_xp = XPTR( chdev_cxy , &chdev_ptr->wait_lock ); 201 197 202 // TODO the hal_disable_irq() / hal_restore_irq() 203 // in the sequence below is probably useless, as it is 204 // already done by the busylock_acquire() / busylock_release() 205 // => remove it [AG] october 2018 206 207 // critical section for the following sequence: 198 // The following actions execute in critical section, 199 // because the lock_acquire / lock_release : 208 200 // (1) take the lock protecting the chdev state 209 // (2) block the client thread 210 // (3) unblock the server thread if required 211 // (4) register client thread in server queue 212 // (5) send IPI to force server scheduling 213 // (6) release the lock protecting waiting queue 214 // (7) deschedule 215 216 // enter critical section 217 hal_disable_irq( &save_sr ); 218 219 // take the lock protecting chdev queue 201 // (2) register client thread in server queue 202 // (3) unblock the server thread and block client thread 203 // (4) send IPI to force server scheduling 204 // (5) release the lock protecting waiting queue 205 206 // 1. take the lock protecting chdev queue 220 207 remote_busylock_acquire( lock_xp ); 221 208 222 // block current thread 223 thread_block( XPTR( local_cxy , CURRENT_THREAD ) , THREAD_BLOCKED_IO ); 224 225 #if (DEBUG_CHDEV_CMD_TX & 1) 226 if( (is_rx == 0) && (DEBUG_CHDEV_CMD_TX < tx_cycle) ) 227 printk("\n[%s] client thread[%x,%x] blocked\n", 228 __FUNCTION__, this->process->pid, this->trdid ); 229 #endif 230 231 #if (DEBUG_CHDEV_CMD_RX & 1) 232 if( (is_rx) && (DEBUG_CHDEV_CMD_RX < rx_cycle) ) 233 printk("\n[%s] client thread[%x,%x] blocked\n", 234 __FUNCTION__, this->process_pid, this->trdid ); 235 #endif 236 237 // unblock server thread if required 238 if( hal_remote_l32( blocked_xp ) & THREAD_BLOCKED_IDLE ) 239 thread_unblock( server_xp , THREAD_BLOCKED_IDLE ); 240 241 #if (DEBUG_CHDEV_CMD_TX & 1) 242 if( (is_rx == 0) && (DEBUG_CHDEV_CMD_TX < tx_cycle) ) 243 printk("\n[%s] TX server thread[%x,%x] unblocked\n", 244 __FUNCTION__, server_pid, server_trdid ); 245 #endif 246 247 #if (DEBUG_CHDEV_CMD_RX & 1) 248 if( (is_rx) && (DEBUG_CHDEV_CMD_RX < rx_cycle) ) 249 printk("\n[%s] RX server thread[%x,%x] unblocked\n", 250 __FUNCTION__, server_pid, server_trdid ); 251 #endif 252 253 // register client thread in waiting queue 209 // 2. register client thread in waiting queue 254 210 xlist_add_last( root_xp , list_xp ); 255 211 … … 266 222 #endif 267 223 268 // send IPI to core running the server thread when server core != client core 224 // 3. client thread unblocks server thread and blocks itself 225 thread_unblock( server_xp , THREAD_BLOCKED_IDLE ); 226 thread_block( XPTR( local_cxy , CURRENT_THREAD ) , THREAD_BLOCKED_IO ); 227 228 #if (DEBUG_CHDEV_CMD_TX & 1) 229 if( (is_rx == 0) && (DEBUG_CHDEV_CMD_TX < tx_cycle) ) 230 printk("\n[%s] client thread[%x,%x] unblock server thread[%x,%x] and block itsef\n", 231 __FUNCTION__, this->process->pid, this->trdid, server_pid, server_trdid ); 232 #endif 233 234 #if (DEBUG_CHDEV_CMD_RX & 1) 235 if( (is_rx) && (DEBUG_CHDEV_CMD_RX < rx_cycle) ) 236 printk("\n[%s] client thread[%x,%x] unblock server thread[%x,%x] and block itsef\n", 237 __FUNCTION__, this->process->pid, this->trdid, server_pid, server_trdid ); 238 #endif 239 240 // 4. send IPI to core running the server thread when server core != client core 269 241 if( (server_lid != this->core->lid) || (local_cxy != chdev_cxy) ) 270 242 { … … 285 257 } 286 258 287 // release lock protecting chdev queue259 // 5. release lock protecting chdev queue 288 260 remote_busylock_release( lock_xp ); 289 261 290 262 // deschedule 291 263 sched_yield("blocked on I/O"); 292 293 // exit critical section294 hal_restore_irq( save_sr );295 264 296 265 #if DEBUG_CHDEV_CMD_RX 297 266 rx_cycle = (uint32_t)hal_get_cycles(); 298 267 if( (is_rx) && (DEBUG_CHDEV_CMD_RX < rx_cycle) ) 299 printk("\n[%s] client _thread[%x,%x] exit for RX / cycle %d\n",268 printk("\n[%s] client thread[%x,%x] exit for RX / cycle %d\n", 300 269 __FUNCTION__, this->process->pid, this->trdid, rx_cycle ); 301 270 #endif … … 304 273 tx_cycle = (uint32_t)hal_get_cycles(); 305 274 if( (is_rx == 0) && (DEBUG_CHDEV_CMD_TX < tx_cycle) ) 306 printk("\n[%s] client _thread[%x,%x] exit for TX / cycle %d\n",275 printk("\n[%s] client thread[%x,%x] exit for TX / cycle %d\n", 307 276 __FUNCTION__, this->process->pid, this->trdid, tx_cycle ); 308 277 #endif … … 344 313 uint32_t rx_cycle = (uint32_t)hal_get_cycles(); 345 314 if( (chdev->is_rx) && (DEBUG_CHDEV_SERVER_RX < rx_cycle) ) 346 printk("\n[%s] DEVthread[%x,%x] check TXT_RX channel %d / cycle %d\n",315 printk("\n[%s] server thread[%x,%x] check TXT_RX channel %d / cycle %d\n", 347 316 __FUNCTION__ , server->process->pid, server->trdid, chdev->channel, rx_cycle ); 348 317 #endif … … 370 339 rx_cycle = (uint32_t)hal_get_cycles(); 371 340 if( (chdev->is_rx) && (DEBUG_CHDEV_SERVER_RX < rx_cycle) ) 372 printk("\n[%s] thread[%x,%x] found RX queue empty => blocks / cycle %d\n",341 printk("\n[%s] server thread[%x,%x] found RX queue empty => blocks / cycle %d\n", 373 342 __FUNCTION__ , server->process->pid, server->trdid, rx_cycle ); 374 343 #endif … … 377 346 tx_cycle = (uint32_t)hal_get_cycles(); 378 347 if( (chdev->is_rx == 0) && (DEBUG_CHDEV_SERVER_TX < tx_cycle) ) 379 printk("\n[%s] thread[%x,%x] found TX queue empty => blocks / cycle %d\n",348 printk("\n[%s] server thread[%x,%x] found TX queue empty => blocks / cycle %d\n", 380 349 __FUNCTION__ , server->process->pid, server->trdid, tx_cycle ); 381 350 #endif … … 407 376 rx_cycle = (uint32_t)hal_get_cycles(); 408 377 if( (chdev->is_rx) && (DEBUG_CHDEV_SERVER_RX < rx_cycle) ) 409 printk("\n[%s] thread[%x,%x] for RX getclient thread[%x,%x] / cycle %d\n",378 printk("\n[%s] server thread[%x,%x] get command from client thread[%x,%x] / cycle %d\n", 410 379 __FUNCTION__, server->process->pid, server->trdid, client_pid, client_trdid, rx_cycle ); 411 380 #endif … … 414 383 tx_cycle = (uint32_t)hal_get_cycles(); 415 384 if( (chdev->is_rx == 0) && (DEBUG_CHDEV_SERVER_TX < tx_cycle) ) 416 printk("\n[%s] thread[%x,%x] for TX getclient thread[%x,%x] / cycle %d\n",385 printk("\n[%s] server thread[%x,%x] get command from client thread[%x,%x] / cycle %d\n", 417 386 __FUNCTION__, server->process->pid, server->trdid, client_pid, client_trdid, tx_cycle ); 418 387 #endif … … 445 414 rx_cycle = (uint32_t)hal_get_cycles(); 446 415 if( (chdev->is_rx) && (DEBUG_CHDEV_SERVER_RX < rx_cycle) ) 447 printk("\n[%s] thread[%x,%x] completes RXfor client thread[%x,%x] / cycle %d\n",416 printk("\n[%s] thread[%x,%x] completes command for client thread[%x,%x] / cycle %d\n", 448 417 __FUNCTION__, server->process->pid, server->trdid, client_pid, client_trdid, rx_cycle ); 449 418 #endif … … 452 421 tx_cycle = (uint32_t)hal_get_cycles(); 453 422 if( (chdev->is_rx == 0) && (DEBUG_CHDEV_SERVER_TX < tx_cycle) ) 454 printk("\n[%s] thread[%x,%x] completes TXfor client thread[%x,%x] / cycle %d\n",423 printk("\n[%s] thread[%x,%x] completes command for client thread[%x,%x] / cycle %d\n", 455 424 __FUNCTION__, server->process->pid, server->trdid, client_pid, client_trdid, tX_cycle ); 456 425 #endif -
trunk/kernel/kern/chdev.h
r619 r625 111 111 /****************************************************************************************** 112 112 * This structure defines a chdev descriptor. 113 * For multi-channels device, there is one chdev descriptor per channel.114 113 * This structure is NOT replicated, and can be located in any cluster. 115 114 * One kernel thread, in charge of handling the commands registered in the waiting queue -
trunk/kernel/kern/printk.c
r623 r625 253 253 break; 254 254 } 255 case ('b'): / / excactly 2 digits hexadecimal255 case ('b'): /* exactly 2 digits hexadecimal */ 256 256 { 257 257 int val = va_arg( *args, int ); … … 426 426 427 427 // print generic infos 428 nolock_printk("\n [PANIC] in %s: line %d | cycle %d\n"428 nolock_printk("\n\n[PANIC] in %s: line %d | cycle %d\n" 429 429 "core[%x,%d] | thread %x (%x) | process %x (%x)\n", 430 430 function_name, line, (uint32_t)cycle, … … 502 502 remote_busylock_acquire( lock_xp ); 503 503 504 // display stringon TTY0504 // display buf on TTY0 505 505 dev_txt_sync_write( buf , 10 ); 506 507 // release TXT0 lock 508 remote_busylock_release( lock_xp ); 509 } 510 511 //////////////////////// 512 void putd( int32_t val ) 513 { 514 static const char HexaTab[] = "0123456789ABCDEF"; 515 516 char buf[10]; 517 uint32_t i; 518 519 // get pointers on TXT0 chdev 520 xptr_t txt0_xp = chdev_dir.txt_tx[0]; 521 cxy_t txt0_cxy = GET_CXY( txt0_xp ); 522 chdev_t * txt0_ptr = GET_PTR( txt0_xp ); 523 524 // get extended pointer on remote TXT0 chdev lock 525 xptr_t lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock ); 526 527 // get TXT0 lock 528 remote_busylock_acquire( lock_xp ); 529 530 if (val < 0) 531 { 532 val = -val; 533 dev_txt_sync_write( "-" , 1 ); 534 } 535 536 for(i = 0; i < 10 ; i++) 537 { 538 buf[9 - i] = HexaTab[val % 10]; 539 if (!(val /= 10)) break; 540 } 541 542 // display buf on TTY0 543 dev_txt_sync_write( &buf[9-i] , i+1 ); 506 544 507 545 // release TXT0 lock -
trunk/kernel/kern/printk.h
r623 r625 123 123 124 124 /********************************************************************************** 125 * This function displays a non-formated message on kernelTXT0 terminal.125 * This function displays a non-formated message on TXT0 terminal. 126 126 * This function is actually used to debug the assembly level kernel functions. 127 127 ********************************************************************************** … … 131 131 132 132 /********************************************************************************** 133 * This function displays a 32 bits value in hexadecimal on kernelTXT0 terminal.133 * This function displays a 32 bits value in hexadecimal on TXT0 terminal. 134 134 * This function is actually used to debug the assembly level kernel functions. 135 135 ********************************************************************************** … … 139 139 140 140 /********************************************************************************** 141 * This function displays a 64 bits value in hexadecimal on kernel TXT0 terminal. 141 * This function displays a 32 bits signed value in decimal on TXT0 terminal. 142 * This function is actually used to debug the assembly level kernel functions. 143 ********************************************************************************** 144 * @ val : 32 bits signed value. 145 *********************************************************************************/ 146 void putd( int32_t val ); 147 148 /********************************************************************************** 149 * This function displays a 64 bits value in hexadecimal on TXT0 terminal. 142 150 * This function is actually used to debug the assembly level kernel functions. 143 151 ********************************************************************************** … … 147 155 148 156 /********************************************************************************** 149 * This debug function displays on the kernelTXT0 terminal the content of an157 * This debug function displays on the TXT0 terminal the content of an 150 158 * array of bytes defined by <buffer> and <size> arguments (16 bytes per line). 151 159 * The <string> argument is displayed before the buffer content. -
trunk/kernel/kern/process.c
r624 r625 91 91 } 92 92 93 ///////////////////////////////////////////////// 94 void process_reference_init( process_t * process, 95 pid_t pid, 96 xptr_t parent_xp ) 97 { 93 //////////////////////////////////////////////////// 94 error_t process_reference_init( process_t * process, 95 pid_t pid, 96 xptr_t parent_xp ) 97 { 98 error_t error; 98 99 xptr_t process_xp; 99 100 cxy_t parent_cxy; … … 105 106 uint32_t stdout_id; 106 107 uint32_t stderr_id; 107 error_t error;108 108 uint32_t txt_id; 109 109 char rx_path[40]; … … 111 111 xptr_t file_xp; 112 112 xptr_t chdev_xp; 113 chdev_t *chdev_ptr;113 chdev_t * chdev_ptr; 114 114 cxy_t chdev_cxy; 115 115 pid_t parent_pid; 116 vmm_t * vmm; 116 117 117 118 // build extended pointer on this reference process 118 119 process_xp = XPTR( local_cxy , process ); 120 121 // get pointer on process vmm 122 vmm = &process->vmm; 119 123 120 124 // get parent process cluster and local pointer … … 129 133 uint32_t cycle = (uint32_t)hal_get_cycles(); 130 134 if( DEBUG_PROCESS_REFERENCE_INIT < cycle ) 131 printk("\n[%s] thread[%x,%x] enter to init alialize process %x / cycle %d\n",132 __FUNCTION__, parent_pid, this->trdid, pid, cycle );135 printk("\n[%s] thread[%x,%x] enter to initialize process %x / cycle %d\n", 136 __FUNCTION__, this->process->pid, this->trdid, pid, cycle ); 133 137 #endif 134 138 … … 144 148 process->cwd_xp = hal_remote_l64( XPTR( parent_cxy, &parent_ptr->cwd_xp ) ); 145 149 146 // initialize vmm as empty 147 error = vmm_init( process ); 148 149 assert( (error == 0) , "cannot initialize VMM\n" ); 150 // initialize VSL as empty 151 vmm->vsegs_nr = 0; 152 xlist_root_init( XPTR( local_cxy , &vmm->vsegs_root ) ); 153 154 // create an empty GPT as required by the architecture 155 error = hal_gpt_create( &vmm->gpt ); 156 if( error ) 157 { 158 printk("\n[ERROR] in %s : cannot create empty GPT\n", __FUNCTION__ ); 159 return -1; 160 } 161 162 #if (DEBUG_PROCESS_REFERENCE_INIT & 1) 163 if( DEBUG_PROCESS_REFERENCE_INIT < cycle ) 164 printk("\n[%s] thread[%x,%x] created empty GPT for process %x\n", 165 __FUNCTION__, parent_pid, this->trdid, pid ); 166 #endif 167 168 // initialize GPT and VSL locks 169 remote_rwlock_init( XPTR( local_cxy , &vmm->gpt_lock ) , LOCK_VMM_GPT ); 170 remote_rwlock_init( XPTR( local_cxy , &vmm->vsl_lock ) , LOCK_VMM_VSL ); 171 172 // register kernel vsegs in VMM as required by the architecture 173 error = hal_vmm_kernel_update( process ); 174 if( error ) 175 { 176 printk("\n[ERROR] in %s : cannot register kernel vsegs in VMM\n", __FUNCTION__ ); 177 return -1; 178 } 179 180 #if (DEBUG_PROCESS_REFERENCE_INIT & 1) 181 if( DEBUG_PROCESS_REFERENCE_INIT < cycle ) 182 printk("\n[%s] thread[%x,%x] registered kernel vsegs for process %x\n", 183 __FUNCTION__, parent_pid, this->trdid, pid ); 184 #endif 185 186 // create "args" and "envs" vsegs 187 // create "stacks" and "mmap" vsegs allocators 188 // initialize locks protecting GPT and VSL 189 error = vmm_user_init( process ); 190 if( error ) 191 { 192 printk("\n[ERROR] in %s : cannot register user vsegs in VMM\n", __FUNCTION__ ); 193 return -1; 194 } 150 195 151 196 #if (DEBUG_PROCESS_REFERENCE_INIT & 1) 152 197 cycle = (uint32_t)hal_get_cycles(); 153 198 if( DEBUG_PROCESS_REFERENCE_INIT < cycle ) 154 printk("\n[%s] thread[%x,%x] / vmm empty for process %x / cycle %d\n",155 __FUNCTION__, parent_pid, this->trdid, pid , cycle);199 printk("\n[%s] thread[%x,%x] initialized vmm for process %x\n", 200 __FUNCTION__, parent_pid, this->trdid, pid ); 156 201 #endif 157 202 … … 187 232 &stdin_xp, 188 233 &stdin_id ); 189 190 assert( (error == 0) , "cannot open stdin pseudo file" ); 234 if( error ) 235 { 236 printk("\n[ERROR] in %s : cannot open stdout pseudo-file\n", __FUNCTION__ ); 237 return -1; 238 } 239 191 240 assert( (stdin_id == 0) , "stdin index must be 0" ); 192 241 … … 206 255 &stdout_xp, 207 256 &stdout_id ); 208 209 assert( (error == 0) , "cannot open stdout pseudo file" ); 210 assert( (stdout_id == 1) , "stdout index must be 1" ); 257 if( error ) 258 { 259 printk("\n[ERROR] in %s : cannot open stdout pseudo-file\n", __FUNCTION__ ); 260 return -1; 261 } 262 263 assert( (stdout_id == 1) , "stdout index must be 1" ); 211 264 212 265 #if (DEBUG_PROCESS_REFERENCE_INIT & 1) … … 225 278 &stderr_xp, 226 279 &stderr_id ); 227 228 assert( (error == 0) , "cannot open stderr pseudo file" ); 229 assert( (stderr_id == 2) , "stderr index must be 2" ); 280 if( error ) 281 { 282 printk("\n[ERROR] in %s : cannot open stderr pseudo-file\n", __FUNCTION__ ); 283 return -1; 284 } 285 286 assert( (stderr_id == 2) , "stderr index must be 2" ); 230 287 231 288 #if (DEBUG_PROCESS_REFERENCE_INIT & 1) … … 240 297 { 241 298 // get extended pointer on stdin pseudo file in parent process 242 file_xp = (xptr_t)hal_remote_l64( XPTR( parent_cxy , &parent_ptr->fd_array.array[0] ) ); 299 file_xp = (xptr_t)hal_remote_l64( XPTR( parent_cxy, 300 &parent_ptr->fd_array.array[0] ) ); 243 301 244 302 // get extended pointer on parent process TXT chdev … … 261 319 262 320 // initialize lock protecting CWD changes 263 remote_busylock_init( XPTR( local_cxy , &process->cwd_lock ), LOCK_PROCESS_CWD ); 321 remote_busylock_init( XPTR( local_cxy , 322 &process->cwd_lock ), LOCK_PROCESS_CWD ); 264 323 265 324 #if (DEBUG_PROCESS_REFERENCE_INIT & 1) … … 273 332 xlist_root_init( XPTR( local_cxy , &process->children_root ) ); 274 333 process->children_nr = 0; 275 remote_queuelock_init( XPTR( local_cxy , &process->children_lock ), LOCK_PROCESS_CHILDREN ); 334 remote_queuelock_init( XPTR( local_cxy, 335 &process->children_lock ), LOCK_PROCESS_CHILDREN ); 276 336 277 337 // reset semaphore / mutex / barrier / condvar list roots and lock … … 280 340 xlist_root_init( XPTR( local_cxy , &process->barrier_root ) ); 281 341 xlist_root_init( XPTR( local_cxy , &process->condvar_root ) ); 282 remote_queuelock_init( XPTR( local_cxy , &process->sync_lock ), LOCK_PROCESS_USERSYNC ); 342 remote_queuelock_init( XPTR( local_cxy , 343 &process->sync_lock ), LOCK_PROCESS_USERSYNC ); 283 344 284 345 // reset open directories root and lock 285 346 xlist_root_init( XPTR( local_cxy , &process->dir_root ) ); 286 remote_queuelock_init( XPTR( local_cxy , &process->dir_lock ), LOCK_PROCESS_DIR ); 347 remote_queuelock_init( XPTR( local_cxy , 348 &process->dir_lock ), LOCK_PROCESS_DIR ); 287 349 288 350 // register new process in the local cluster manager pref_tbl[] … … 315 377 #endif 316 378 379 return 0; 380 317 381 } // process_reference_init() 318 382 … … 321 385 xptr_t reference_process_xp ) 322 386 { 323 error_t error; 387 error_t error; 388 vmm_t * vmm; 324 389 325 390 // get reference process cluster and local pointer 326 391 cxy_t ref_cxy = GET_CXY( reference_process_xp ); 327 392 process_t * ref_ptr = GET_PTR( reference_process_xp ); 393 394 // get pointer on process vmm 395 vmm = &local_process->vmm; 328 396 329 397 // initialize PID, REF_XP, PARENT_XP, and STATE … … 343 411 344 412 // check user process 345 assert( (local_process->pid != 0), "PID cannot be 0" ); 346 347 // reset local process vmm 348 error = vmm_init( local_process ); 349 assert( (error == 0) , "cannot initialize VMM\n"); 350 351 // reset process file descriptors array 413 assert( (local_process->pid != 0), "LPID cannot be 0" ); 414 415 // initialize VSL as empty 416 vmm->vsegs_nr = 0; 417 xlist_root_init( XPTR( local_cxy , &vmm->vsegs_root ) ); 418 419 // create an empty GPT as required by the architecture 420 error = hal_gpt_create( &vmm->gpt ); 421 if( error ) 422 { 423 printk("\n[ERROR] in %s : cannot create empty GPT\n", __FUNCTION__ ); 424 return -1; 425 } 426 427 // initialize GPT and VSL locks 428 remote_rwlock_init( XPTR( local_cxy , &vmm->gpt_lock ) , LOCK_VMM_GPT ); 429 remote_rwlock_init( XPTR( local_cxy , &vmm->vsl_lock ) , LOCK_VMM_VSL ); 430 431 // register kernel vsegs in VMM as required by the architecture 432 error = hal_vmm_kernel_update( local_process ); 433 if( error ) 434 { 435 printk("\n[ERROR] in %s : cannot register kernel vsegs in VMM\n", __FUNCTION__ ); 436 return -1; 437 } 438 439 // create "args" and "envs" vsegs 440 // create "stacks" and "mmap" vsegs allocators 441 // initialize locks protecting GPT and VSL 442 error = vmm_user_init( local_process ); 443 if( error ) 444 { 445 printk("\n[ERROR] in %s : cannot register user vsegs in VMM\n", __FUNCTION__ ); 446 return -1; 447 } 448 449 #if (DEBUG_PROCESS_COPY_INIT & 1) 450 cycle = (uint32_t)hal_get_cycles(); 451 if( DEBUG_PROCESS_COPY_INIT < cycle ) 452 printk("\n[%s] thread[%x,%x] initialized vmm for process %x / cycle %d\n", 453 __FUNCTION__, parent_pid, this->trdid, pid, cycle ); 454 #endif 455 456 // set process file descriptors array 352 457 process_fd_init( local_process ); 353 458 354 // reset vfs_root_xp / vfs_bin_xp / cwd_xp fields459 // set vfs_root_xp / vfs_bin_xp / cwd_xp fields 355 460 local_process->vfs_root_xp = hal_remote_l64( XPTR( ref_cxy , &ref_ptr->vfs_root_xp ) ); 356 461 local_process->vfs_bin_xp = hal_remote_l64( XPTR( ref_cxy , &ref_ptr->vfs_bin_xp ) ); … … 380 485 local_process->th_nr = 0; 381 486 rwlock_init( &local_process->th_lock , LOCK_PROCESS_THTBL ); 382 383 487 384 488 // register new process descriptor in local cluster manager local_list … … 451 555 #endif 452 556 453 // remove process from children_list 454 // and release PID if owner cluster 557 // when target process cluster is the owner cluster 558 // - remove process from TXT list and transfer ownership 559 // - remove process from children_list 560 // - release PID 455 561 if( CXY_FROM_PID( pid ) == local_cxy ) 456 562 { 563 process_txt_detach( XPTR( local_cxy , process ) ); 564 565 #if (DEBUG_PROCESS_DESTROY & 1) 566 if( DEBUG_PROCESS_DESTROY < cycle ) 567 printk("\n[%s] thread[%x,%x] removed process %x from TXT list\n", 568 __FUNCTION__, this->process->pid, this->trdid, pid ); 569 #endif 570 457 571 // get pointers on parent process 458 572 parent_xp = process->parent_xp; … … 472 586 #if (DEBUG_PROCESS_DESTROY & 1) 473 587 if( DEBUG_PROCESS_DESTROY < cycle ) 474 printk("\n[%s] thread[%x,%x] removed process %x in cluster %x fromchildren list\n",475 __FUNCTION__, this->process->pid, this->trdid, pid , local_cxy);588 printk("\n[%s] thread[%x,%x] removed process %x from parent process children list\n", 589 __FUNCTION__, this->process->pid, this->trdid, pid ); 476 590 #endif 477 591 … … 777 891 uint32_t cycle = (uint32_t)hal_get_cycles(); 778 892 if( DEBUG_PROCESS_SIGACTION < cycle ) 779 printk("\n[%s] thread[%x,%x] enter in cluster %x for process%x / cycle %d\n",780 __FUNCTION__, this->process->pid, this->trdid, local_cxy, process->pid, cycle );893 printk("\n[%s] thread[%x,%x] enter for process %x n cluster %x / cycle %d\n", 894 __FUNCTION__, this->process->pid, this->trdid, process->pid, local_cxy, cycle ); 781 895 #endif 782 896 … … 1189 1303 } // end process_register_thread() 1190 1304 1191 ///////////////////////////////////////////////// 1192 bool_t process_remove_thread( thread_t * thread )1305 /////////////////////////////////////////////////// 1306 uint32_t process_remove_thread( thread_t * thread ) 1193 1307 { 1194 1308 uint32_t count; // number of threads in local process descriptor 1309 1310 // check thread 1311 assert( (thread != NULL) , "thread argument is NULL" ); 1195 1312 1196 1313 process_t * process = thread->process; … … 1205 1322 count = process->th_nr; 1206 1323 1207 // check thread1208 assert( (thread != NULL) , "thread argument is NULL" );1209 1210 1324 // check th_nr value 1211 1325 assert( (count > 0) , "process th_nr cannot be 0" ); … … 1218 1332 rwlock_wr_release( &process->th_lock ); 1219 1333 1220 return (count == 1);1334 return count; 1221 1335 1222 1336 } // end process_remove_thread() … … 1283 1397 cycle = (uint32_t)hal_get_cycles(); 1284 1398 if( DEBUG_PROCESS_MAKE_FORK < cycle ) 1285 printk("\n[%s] thread[%x,%x] allocated process %x / cycle %d\n",1399 printk("\n[%s] thread[%x,%x] allocated child_process %x / cycle %d\n", 1286 1400 __FUNCTION__, pid, trdid, new_pid, cycle ); 1287 1401 #endif 1288 1402 1289 1403 // initializes child process descriptor from parent process descriptor 1290 process_reference_init( process, 1291 new_pid, 1292 parent_process_xp ); 1404 error = process_reference_init( process, 1405 new_pid, 1406 parent_process_xp ); 1407 if( error ) 1408 { 1409 printk("\n[ERROR] in %s : cannot initialize child process in cluster %x\n", 1410 __FUNCTION__, local_cxy ); 1411 process_free( process ); 1412 return -1; 1413 } 1293 1414 1294 1415 #if( DEBUG_PROCESS_MAKE_FORK & 1 ) … … 1298 1419 __FUNCTION__, pid, trdid, new_pid, cycle ); 1299 1420 #endif 1300 1301 1421 1302 1422 // copy VMM from parent descriptor to child descriptor … … 1361 1481 #endif 1362 1482 1363 // set C opy_On_Write flag in parent process GPT1364 // this includes all replicated GPT copies1483 // set COW flag in DATA, ANON, REMOTE vsegs for parent process VMM 1484 // this includes all parnet process copies in all clusters 1365 1485 if( parent_process_cxy == local_cxy ) // reference is local 1366 1486 { … … 1373 1493 } 1374 1494 1375 // set C opy_On_Write flag in child process GPT1495 // set COW flag in DATA, ANON, REMOTE vsegs for child process VMM 1376 1496 vmm_set_cow( process ); 1377 1497 … … 1423 1543 char ** args_pointers; // array of pointers on main thread arguments 1424 1544 1425 // get thread, process, pid and ref_xp1545 // get calling thread, process, pid and ref_xp 1426 1546 thread = CURRENT_THREAD; 1427 1547 process = thread->process; … … 1470 1590 cycle = (uint32_t)hal_get_cycles(); 1471 1591 if( DEBUG_PROCESS_MAKE_EXEC < cycle ) 1472 printk("\n[%s] thread[%x,%x] deleted allthreads / cycle %d\n",1592 printk("\n[%s] thread[%x,%x] deleted existing threads / cycle %d\n", 1473 1593 __FUNCTION__, pid, thread->trdid, cycle ); 1474 1594 #endif 1475 1595 1476 // reset localprocess VMM1477 vmm_ destroy( process );1596 // reset calling process VMM 1597 vmm_user_reset( process ); 1478 1598 1479 1599 #if( DEBUG_PROCESS_MAKE_EXEC & 1 ) 1480 1600 cycle = (uint32_t)hal_get_cycles(); 1481 1601 if( DEBUG_PROCESS_MAKE_EXEC < cycle ) 1482 printk("\n[%s] thread[%x,%x] reset VMM/ cycle %d\n",1602 printk("\n[%s] thread[%x,%x] completed VMM reset / cycle %d\n", 1483 1603 __FUNCTION__, pid, thread->trdid, cycle ); 1484 1604 #endif 1485 1605 1486 // re-initialize the VMM ( kentry/args/envs vsegs registration)1487 error = vmm_ init( process );1606 // re-initialize the VMM (args/envs vsegs registration) 1607 error = vmm_user_init( process ); 1488 1608 if( error ) 1489 1609 { … … 1497 1617 cycle = (uint32_t)hal_get_cycles(); 1498 1618 if( DEBUG_PROCESS_MAKE_EXEC < cycle ) 1499 printk("\n[%s] thread[%x,%x] / kentry/args/envs vsegs registered/ cycle %d\n",1619 printk("\n[%s] thread[%x,%x] registered args/envs vsegs / cycle %d\n", 1500 1620 __FUNCTION__, pid, thread->trdid, cycle ); 1501 1621 #endif … … 1515 1635 cycle = (uint32_t)hal_get_cycles(); 1516 1636 if( DEBUG_PROCESS_MAKE_EXEC < cycle ) 1517 printk("\n[%s] thread[%x,%x] / code/data vsegs registered/ cycle %d\n",1637 printk("\n[%s] thread[%x,%x] registered code/data vsegs / cycle %d\n", 1518 1638 __FUNCTION__, pid, thread->trdid, cycle ); 1519 1639 #endif … … 1577 1697 vmm->vsegs_nr = 0; 1578 1698 xlist_root_init( XPTR( local_cxy , &vmm->vsegs_root ) ); 1579 remote_rwlock_init( XPTR( local_cxy , &vmm->vsegs_lock ) , LOCK_VMM_VSL );1580 1699 1581 1700 // initialise GPT as empty 1582 1701 error = hal_gpt_create( &vmm->gpt ); 1583 1584 1702 if( error ) 1585 1703 { … … 1588 1706 } 1589 1707 1590 // initialize GPT lock 1708 // initialize VSL and GPT locks 1709 remote_rwlock_init( XPTR( local_cxy , &vmm->vsl_lock ) , LOCK_VMM_VSL ); 1591 1710 remote_rwlock_init( XPTR( local_cxy , &vmm->gpt_lock ) , LOCK_VMM_GPT ); 1592 1711 1593 1712 // create kernel vsegs in GPT and VSL, as required by the hardware architecture 1594 1713 error = hal_vmm_kernel_init( info ); 1595 1596 1714 if( error ) 1597 1715 { … … 1652 1770 // allocates memory for process descriptor from local cluster 1653 1771 process = process_alloc(); 1654 1655 // check memory allocator 1656 assert( (process != NULL), 1657 "no memory for process descriptor in cluster %x", local_cxy ); 1772 if( process == NULL ) 1773 { 1774 printk("\n[PANIC] in %s : cannot allocate process\n", __FUNCTION__ ); 1775 hal_core_sleep(); 1776 } 1658 1777 1659 1778 // set the CWD and VFS_ROOT fields in process descriptor … … 1663 1782 // get PID from local cluster 1664 1783 error = cluster_pid_alloc( process , &pid ); 1665 1666 // check PID allocator 1667 assert( (error == 0), 1668 "cannot allocate PID in cluster %x", local_cxy ); 1669 1670 // check PID value 1671 assert( (pid == 1) , 1672 "process INIT must be first process in cluster 0" ); 1784 if( error ) 1785 { 1786 printk("\n[PANIC] in %s : cannot allocate PID\n", __FUNCTION__ ); 1787 hal_core_sleep(); 1788 } 1789 if( pid != 1 ) 1790 { 1791 printk("\n[PANIC] in %s : process PID must be 0x1\n", __FUNCTION__ ); 1792 hal_core_sleep(); 1793 } 1673 1794 1674 1795 // initialize process descriptor / parent is local process_zero 1675 process_reference_init( process, 1676 pid, 1677 XPTR( local_cxy , &process_zero ) ); 1796 error = process_reference_init( process, 1797 pid, 1798 XPTR( local_cxy , &process_zero ) ); 1799 if( error ) 1800 { 1801 printk("\n[PANIC] in %s : cannot initialize process\n", __FUNCTION__ ); 1802 hal_core_sleep(); 1803 } 1678 1804 1679 1805 #if(DEBUG_PROCESS_INIT_CREATE & 1) … … 1693 1819 &file_xp, 1694 1820 &file_id ); 1695 1696 assert( (error == 0), 1697 "failed to open file <%s>", CONFIG_PROCESS_INIT_PATH ); 1821 if( error ) 1822 { 1823 printk("\n[PANIC] in %s : cannot open file <%s>\n", 1824 __FUNCTION__, CONFIG_PROCESS_INIT_PATH ); 1825 hal_core_sleep(); 1826 } 1698 1827 1699 1828 #if(DEBUG_PROCESS_INIT_CREATE & 1) … … 1703 1832 #endif 1704 1833 1705 // register "code" and "data" vsegs as well as entry-point1834 // register "code" and "data" vsegs as well as entry-point 1706 1835 // in process VMM, using information contained in the elf file. 1707 1836 error = elf_load_process( file_xp , process ); 1708 1837 1709 assert( (error == 0), 1710 "cannot access .elf file <%s>", CONFIG_PROCESS_INIT_PATH ); 1838 if( error ) 1839 { 1840 printk("\n[PANIC] in %s : cannot access file <%s>\n", 1841 __FUNCTION__, CONFIG_PROCESS_INIT_PATH ); 1842 hal_core_sleep(); 1843 } 1844 1711 1845 1712 1846 #if(DEBUG_PROCESS_INIT_CREATE & 1) … … 1714 1848 printk("\n[%s] thread[%x,%x] registered code/data vsegs in VMM\n", 1715 1849 __FUNCTION__, this->process->pid, this->trdid ); 1850 #endif 1851 1852 #if (DEBUG_PROCESS_INIT_CREATE & 1) 1853 hal_vmm_display( process , true ); 1716 1854 #endif 1717 1855 … … 1751 1889 &thread ); 1752 1890 1753 assert( (error == 0), 1754 "cannot create main thread for <%s>", CONFIG_PROCESS_INIT_PATH ); 1755 1756 assert( (thread->trdid == 0), 1757 "main thread must have index 0 for <%s>", CONFIG_PROCESS_INIT_PATH ); 1891 if( error ) 1892 { 1893 printk("\n[PANIC] in %s : cannot create main thread\n", __FUNCTION__ ); 1894 hal_core_sleep(); 1895 } 1896 if( thread->trdid != 0 ) 1897 { 1898 printk("\n[PANIC] in %s : bad main thread trdid\n", __FUNCTION__ ); 1899 hal_core_sleep(); 1900 } 1758 1901 1759 1902 #if(DEBUG_PROCESS_INIT_CREATE & 1) … … 1989 2132 process_txt_transfer_ownership( process_xp ); 1990 2133 1991 // get extended pointer on process stdin file2134 // get extended pointer on process stdin pseudo file 1992 2135 file_xp = (xptr_t)hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[0] ) ); 1993 2136 … … 2014 2157 uint32_t txt_id = hal_remote_l32( XPTR( chdev_cxy , &chdev_ptr->channel ) ); 2015 2158 if( DEBUG_PROCESS_TXT < cycle ) 2016 printk("\n[%s] thread[%x,%x] detached process %x from TXT 2159 printk("\n[%s] thread[%x,%x] detached process %x from TXT%d / cycle %d\n", 2017 2160 __FUNCTION__, this->process->pid, this->trdid, process_pid, txt_id, cycle ); 2018 2161 #endif … … 2056 2199 uint32_t txt_id = hal_remote_l32( XPTR( txt_cxy , &txt_ptr->channel ) ); 2057 2200 if( DEBUG_PROCESS_TXT < cycle ) 2058 printk("\n[%s] thread[%x,%x] give TXT %dto process %x / cycle %d\n",2201 printk("\n[%s] thread[%x,%x] give TXT%d ownership to process %x / cycle %d\n", 2059 2202 __FUNCTION__, this->process->pid, this->trdid, txt_id, process_pid, cycle ); 2060 2203 #endif … … 2078 2221 xptr_t iter_xp; // iterator for xlist 2079 2222 xptr_t current_xp; // extended pointer on current process 2080 process_t * current_ptr; // local pointer on current process 2081 cxy_t current_cxy; // cluster for current process 2223 bool_t found; 2082 2224 2083 2225 #if DEBUG_PROCESS_TXT … … 2086 2228 #endif 2087 2229 2088 // get pointers on process in owner cluster2230 // get pointers on target process 2089 2231 process_cxy = GET_CXY( process_xp ); 2090 2232 process_ptr = GET_PTR( process_xp ); 2091 2233 process_pid = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid ) ); 2092 2234 2093 2094 2095 2235 // check owner cluster 2236 assert( (process_cxy == CXY_FROM_PID( process_pid )) , 2237 "process descriptor not in owner cluster" ); 2096 2238 2097 2239 // get extended pointer on stdin pseudo file … … 2103 2245 txt_ptr = GET_PTR( txt_xp ); 2104 2246 2105 // get extended pointer on TXT_RX owner and TXT channel2247 // get relevant infos from chdev descriptor 2106 2248 owner_xp = hal_remote_l64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) ); 2107 txt_id = hal_remote_l32 2108 2109 // transfer ownership only if process is the TXT owner2249 txt_id = hal_remote_l32( XPTR( txt_cxy , &txt_ptr->channel ) ); 2250 2251 // transfer ownership only if target process is the TXT owner 2110 2252 if( (owner_xp == process_xp) && (txt_id > 0) ) 2111 2253 { … … 2114 2256 lock_xp = XPTR( txt_cxy , &txt_ptr->ext.txt.lock ); 2115 2257 2116 // get lock 2117 remote_busylock_acquire( lock_xp ); 2118 2119 if( process_get_ppid( process_xp ) != 1 ) // process is not KSH 2258 if( process_get_ppid( process_xp ) != 1 ) // target process is not KSH 2120 2259 { 2260 // get lock 2261 remote_busylock_acquire( lock_xp ); 2262 2121 2263 // scan attached process list to find KSH process 2122 XLIST_FOREACH( root_xp , iter_xp ) 2264 found = false; 2265 for( iter_xp = hal_remote_l64( root_xp ) ; 2266 (iter_xp != root_xp) && (found == false) ; 2267 iter_xp = hal_remote_l64( iter_xp ) ) 2268 { 2269 current_xp = XLIST_ELEMENT( iter_xp , process_t , txt_list ); 2270 2271 if( process_get_ppid( current_xp ) == 1 ) // current is KSH 2272 { 2273 // set owner field in TXT chdev 2274 hal_remote_s64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , current_xp ); 2275 2276 #if DEBUG_PROCESS_TXT 2277 cycle = (uint32_t)hal_get_cycles(); 2278 if( DEBUG_PROCESS_TXT < cycle ) 2279 printk("\n[%s] thread[%x,%x] transfered TXT%d ownership to KSH / cycle %d\n", 2280 __FUNCTION__, this->process->pid, this->trdid, txt_id, cycle ); 2281 #endif 2282 found = true; 2283 } 2284 } 2285 2286 // release lock 2287 remote_busylock_release( lock_xp ); 2288 2289 // It must exist a KSH process for each user TXT channel 2290 assert( (found == true), "KSH process not found for TXT%d", txt_id ); 2291 2292 } 2293 else // target process is KSH 2294 { 2295 // get lock 2296 remote_busylock_acquire( lock_xp ); 2297 2298 // scan attached process list to find another process 2299 found = false; 2300 for( iter_xp = hal_remote_l64( root_xp ) ; 2301 (iter_xp != root_xp) && (found == false) ; 2302 iter_xp = hal_remote_l64( iter_xp ) ) 2123 2303 { 2124 2304 current_xp = XLIST_ELEMENT( iter_xp , process_t , txt_list ); 2125 current_cxy = GET_CXY( current_xp ); 2126 current_ptr = GET_PTR( current_xp ); 2127 2128 if( process_get_ppid( current_xp ) == 1 ) // current is KSH 2305 2306 if( current_xp != process_xp ) // current is not KSH 2129 2307 { 2130 // release lock2131 remote_busylock_release( lock_xp );2132 2133 2308 // set owner field in TXT chdev 2134 2309 hal_remote_s64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , current_xp ); 2135 2310 2136 2311 #if DEBUG_PROCESS_TXT 2137 cycle = (uint32_t)hal_get_cycles(); 2138 uint32_t ksh_pid = hal_remote_l32( XPTR( current_cxy , ¤t_ptr->pid ) ); 2312 cycle = (uint32_t)hal_get_cycles(); 2313 cxy_t current_cxy = GET_CXY( current_xp ); 2314 process_t * current_ptr = GET_PTR( current_xp ); 2315 uint32_t new_pid = hal_remote_l32( XPTR( current_cxy , ¤t_ptr->pid ) ); 2139 2316 if( DEBUG_PROCESS_TXT < cycle ) 2140 printk("\n[%s] thread[%x,%x] release TXT %d to KSH %x / cycle %d\n", 2141 __FUNCTION__, this->process->pid, this->trdid, txt_id, ksh_pid, cycle ); 2142 process_txt_display( txt_id ); 2143 #endif 2144 return; 2317 printk("\n[%s] thread[%x,%x] transfered TXT%d ownership to process %x / cycle %d\n", 2318 __FUNCTION__,this->process->pid, this->trdid, txt_id, new_pid, cycle ); 2319 #endif 2320 found = true; 2145 2321 } 2146 2322 } 2147 2323 2148 2324 // release lock 2149 2325 remote_busylock_release( lock_xp ); 2150 2326 2151 // PANIC if KSH not found 2152 assert( false , "KSH process not found for TXT %d" ); 2153 2154 return; 2327 // no more owner for TXT if no other process found 2328 if( found == false ) 2329 { 2330 // set owner field in TXT chdev 2331 hal_remote_s64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , XPTR_NULL ); 2332 2333 #if DEBUG_PROCESS_TXT 2334 cycle = (uint32_t)hal_get_cycles(); 2335 if( DEBUG_PROCESS_TXT < cycle ) 2336 printk("\n[%s] thread[%x,%x] released TXT%d (no attached process) / cycle %d\n", 2337 __FUNCTION__, this->process->pid, this->trdid, txt_id, cycle ); 2338 #endif 2339 } 2155 2340 } 2156 else // process is KSH 2157 { 2158 // scan attached process list to find another process 2159 XLIST_FOREACH( root_xp , iter_xp ) 2160 { 2161 current_xp = XLIST_ELEMENT( iter_xp , process_t , txt_list ); 2162 current_cxy = GET_CXY( current_xp ); 2163 current_ptr = GET_PTR( current_xp ); 2164 2165 if( current_xp != process_xp ) // current is not KSH 2166 { 2167 // release lock 2168 remote_busylock_release( lock_xp ); 2169 2170 // set owner field in TXT chdev 2171 hal_remote_s64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , current_xp ); 2341 } 2342 else 2343 { 2172 2344 2173 2345 #if DEBUG_PROCESS_TXT 2174 cycle = (uint32_t)hal_get_cycles(); 2175 uint32_t new_pid = hal_remote_l32( XPTR( current_cxy , ¤t_ptr->pid ) ); 2346 cycle = (uint32_t)hal_get_cycles(); 2176 2347 if( DEBUG_PROCESS_TXT < cycle ) 2177 printk("\n[%s] thread[%x,%x] release TXT %d to process %x / cycle %d\n", 2178 __FUNCTION__,this->process->pid, this->trdid, txt_id, new_pid, cycle ); 2179 process_txt_display( txt_id ); 2180 #endif 2181 return; 2182 } 2183 } 2184 2185 // release lock 2186 remote_busylock_release( lock_xp ); 2187 2188 // no more owner for TXT if no other process found 2189 hal_remote_s64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , XPTR_NULL ); 2190 2191 #if DEBUG_PROCESS_TXT 2192 cycle = (uint32_t)hal_get_cycles(); 2193 if( DEBUG_PROCESS_TXT < cycle ) 2194 printk("\n[%s] thread[%x,%x] release TXT %d to nobody / cycle %d\n", 2195 __FUNCTION__, this->process->pid, this->trdid, txt_id, cycle ); 2196 process_txt_display( txt_id ); 2197 #endif 2198 return; 2199 } 2200 } 2201 else 2202 { 2203 2204 #if DEBUG_PROCESS_TXT 2205 cycle = (uint32_t)hal_get_cycles(); 2206 if( DEBUG_PROCESS_TXT < cycle ) 2207 printk("\n[%s] thread %x in process %d does nothing (not TXT owner) / cycle %d\n", 2208 __FUNCTION__, this->trdid, process_pid, cycle ); 2209 process_txt_display( txt_id ); 2210 #endif 2211 2212 } 2348 printk("\n[%s] thread[%x,%x] does nothing for process %x (not TXT owner) / cycle %d\n", 2349 __FUNCTION__, this->process->pid, this->trdid, process_pid, cycle ); 2350 #endif 2351 2352 } 2353 2213 2354 } // end process_txt_transfer_ownership() 2214 2355 -
trunk/kernel/kern/process.h
r623 r625 228 228 229 229 /********************************************************************************************* 230 * This function initializes a reference ,user process descriptor from another process230 * This function initializes a reference user process descriptor from another process 231 231 * descriptor, defined by the <parent_xp> argument. The <process> and <pid> arguments 232 232 * are previously allocated by the caller. This function can be called by two functions: 233 * 1) process_init_create() : process is the INIT process ;parent is process-zero.233 * 1) process_init_create() : process is the INIT process, and parent is process-zero. 234 234 * 2) process_make_fork() : the parent process descriptor is generally remote. 235 235 * The following fields are initialised : 236 236 * - It set the pid / ppid / ref_xp / parent_xp / state fields. 237 * - It initializes the VMM (register the kentry, args, envs vsegs in VSL) 237 * - It creates an empty GPT and an empty VSL. 238 * - It initializes the locks protecting the GPT and the VSL. 239 * - It registers the "kernel" vsegs in VSL, using the hal_vmm_kernel_update() function. 240 * - It registers the "args" and "envs" vsegs in VSL, using the vmm_user_init() function. 241 * - The "code and "data" must be registered later, using the elf_load_process() function. 238 242 * - It initializes the FDT, defining the three pseudo files STDIN / STDOUT / STDERR. 239 243 * . if INIT process => link to kernel TXT[0]. 240 * . if KSH[i] process => allocate a free TXT[i] and give TXT ownership.241 * . if USER process => same TXT[i] as parent process and give TXT ownership.244 * . if KSH[i] process => allocate a free TXT[i]. 245 * . if USER process => link to parent process TXT[i]. 242 246 * - It set the root_xp, bin_xp, cwd_xp fields. 243 247 * - It reset the children list as empty, but does NOT register it in parent children list. … … 251 255 * @ pid : [in] process identifier. 252 256 * @ parent_xp : [in] extended pointer on parent process descriptor. 253 ********************************************************************************************/ 254 void process_reference_init( process_t * process, 255 pid_t pid, 256 xptr_t parent_xp ); 257 * @ return 0 if success / return -1 if failure 258 ********************************************************************************************/ 259 error_t process_reference_init( process_t * process, 260 pid_t pid, 261 xptr_t parent_xp ); 257 262 258 263 /********************************************************************************************* 259 264 * This function initializes a copy process descriptor, in the local cluster, 260 265 * from information defined in the reference remote process descriptor. 266 * As the VSL and the GPT of a process copy are handled as local caches, the GPT copy is 267 * created empty, and the VSL copy contains only the "kernel", "args", and "envs" vsegs. 261 268 ********************************************************************************************* 262 269 * @ process : [in] local pointer on process descriptor to initialize. 263 270 * @ reference_process_xp : [in] extended pointer on reference process descriptor. 264 * @ return 0 if success / return ENOMEMif failure271 * @ return 0 if success / return -1 if failure 265 272 ********************************************************************************************/ 266 273 error_t process_copy_init( process_t * local_process, … … 272 279 * The local th_tbl[] array must be empty. 273 280 ********************************************************************************************* 274 * @ process : pointer on the process descriptor.281 * @ process : [in] pointer on the process descriptor. 275 282 ********************************************************************************************/ 276 283 void process_destroy( process_t * process ); … … 283 290 * taken by the caller function. 284 291 ********************************************************************************************* 285 * @ process_xp :extended pointer on process descriptor.292 * @ process_xp : [in] extended pointer on process descriptor. 286 293 ********************************************************************************************/ 287 294 void process_display( xptr_t process_xp ); … … 396 403 /********************************************************************************************* 397 404 * This function implements the "fork" system call, and is called by the sys_fork() function, 398 * likely throu ch the RPC_PROCESS_MAKE_FORK.399 * It allocates memory and initializes a new "child"process descriptor, and the associated400 * "child" thread descriptor in local cluster. It involves up to three different clusters:405 * likely through the RPC_PROCESS_MAKE_FORK. 406 * It allocates memory and initializes a new child process descriptor, and the associated 407 * child thread descriptor in local cluster. It involves up to three different clusters: 401 408 * - the child (local) cluster can be any cluster selected by the sys_fork function. 402 409 * - the parent cluster must be the reference cluster for the parent process. 403 410 * - the client cluster containing the thread requesting the fork can be any cluster. 404 * The new "child" process descriptor is initialised from informations found in the "parent"411 * The new child process descriptor is initialised from informations found in the parent 405 412 * reference process descriptor, containing the complete process description. 406 * The new "child" thread descriptor is initialised from informations found in the "parent"413 * The new child thread descriptor is initialised from informations found in the parent 407 414 * thread descriptor. 408 415 ********************************************************************************************* … … 504 511 505 512 /********************************************************************************************* 506 * This function atomically registers a new thread in the local process descriptor. 507 * It checks that there is an available slot in the local th_tbl[] array, and allocates 508 * a new LTID using the relevant lock depending on the kernel/user type. 509 ********************************************************************************************* 510 * @ process : pointer on the local process descriptor. 511 * @ thread : pointer on new thread to be registered. 513 * This function atomically registers a new thread identified by the <thread> argument 514 * in the th_tbl[] array of the local process descriptor identified by the <process> 515 * argument. It checks that there is an available slot in the local th_tbl[] array, 516 * and allocates a new LTID using the relevant lock depending on the kernel/user type, 517 * and returns the global thread identifier in the <trdid> buffer. 518 ********************************************************************************************* 519 * @ process : [in] pointer on the local process descriptor. 520 * @ thread : [in] pointer on new thread to be registered. 512 521 * @ trdid : [out] buffer for allocated trdid. 513 522 * @ returns 0 if success / returns non zero if no slot available. … … 516 525 struct thread_s * thread, 517 526 trdid_t * trdid ); 527 528 /********************************************************************************************* 529 * This function atomically removes a thread identified by the <thread> argument from 530 * the local process descriptor th_tbl[] array, and returns the number of thread currently 531 * registered in th_tbl[] array before this remove. 532 ********************************************************************************************* 533 * @ thread : pointer on thread to be removed. 534 * @ returns number of threads registered in th_tbl before thread remove. 535 ********************************************************************************************/ 536 uint32_t process_remove_thread( struct thread_s * thread ); 518 537 519 538 … … 556 575 557 576 /********************************************************************************************* 558 * This function gives a process identified by the <process_xp> argument the exclusive577 * This function gives a process identified by the <process_xp> argument the 559 578 * ownership of its attached TXT_RX terminal (i.e. put the process in foreground). 560 * It can be called by a thread running in any cluster, but the <process_xp> must be the561 * owner cluster process descriptor.579 * It can be called by a thread running in any cluster, but the target process descriptor 580 * must be the process owner. 562 581 ********************************************************************************************* 563 582 * @ owner_xp : extended pointer on process descriptor in owner cluster. … … 566 585 567 586 /********************************************************************************************* 568 * When the process identified by the <owner_xp> argument has the exclusive ownership of 569 * the TXT_RX terminal, this function transfer this ownership to another attached process. 570 * The process descriptor must be the process owner. 571 * This function does nothing if the process identified by the <process_xp> is not 572 * the TXT owner. 587 * When the target process identified by the <owner_xp> argument has the exclusive ownership 588 * of the TXT_RX terminal, this function transfer this ownership to another process attached 589 * to the same terminal. The target process descriptor must be the process owner. 590 * This function does nothing if the target process is not the TXT owner. 573 591 * - If the current owner is not the KSH process, the new owner is the KSH process. 574 592 * - If the current owner is the KSH process, the new owner is another attached process. -
trunk/kernel/kern/rpc.c
r624 r625 24 24 #include <kernel_config.h> 25 25 #include <hal_kernel_types.h> 26 #include <hal_vmm.h> 26 27 #include <hal_atomic.h> 27 28 #include <hal_remote.h> … … 52 53 &rpc_pmem_get_pages_server, // 0 53 54 &rpc_pmem_release_pages_server, // 1 54 &rpc_ undefined, // 2 unused slot55 &rpc_ppm_display_server, // 2 55 56 &rpc_process_make_fork_server, // 3 56 57 &rpc_user_dir_create_server, // 4 … … 81 82 &rpc_vmm_create_vseg_server, // 27 82 83 &rpc_vmm_set_cow_server, // 28 83 &rpc_hal_vmm_display_server, 84 &rpc_hal_vmm_display_server, // 29 84 85 }; 85 86 … … 88 89 "PMEM_GET_PAGES", // 0 89 90 "PMEM_RELEASE_PAGES", // 1 90 " undefined",// 291 "PPM_DISPLAY", // 2 91 92 "PROCESS_MAKE_FORK", // 3 92 93 "USER_DIR_CREATE", // 4 … … 566 567 567 568 ///////////////////////////////////////////////////////////////////////////////////////// 568 // [2] undefined slot 569 ///////////////////////////////////////////////////////////////////////////////////////// 569 // [2] Marshaling functions attached to RPC_PPM_DISPLAY 570 ///////////////////////////////////////////////////////////////////////////////////////// 571 572 ///////////////////////////////////////// 573 void rpc_ppm_display_client( cxy_t cxy ) 574 { 575 #if DEBUG_RPC_PPM_DISPLAY 576 thread_t * this = CURRENT_THREAD; 577 uint32_t cycle = (uint32_t)hal_get_cycles(); 578 if( cycle > DEBUG_RPC_PPM_DISPLAY ) 579 printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", 580 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 581 #endif 582 583 uint32_t responses = 1; 584 585 // initialise RPC descriptor header 586 rpc_desc_t rpc; 587 rpc.index = RPC_PPM_DISPLAY; 588 rpc.blocking = true; 589 rpc.rsp = &responses; 590 591 // register RPC request in remote RPC fifo 592 rpc_send( cxy , &rpc ); 593 594 #if DEBUG_RPC_PPM_DISPLAY 595 cycle = (uint32_t)hal_get_cycles(); 596 if( cycle > DEBUG_RPC_PPM_DISPLAY ) 597 printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", 598 __FUNCTION__, this->process->pid, this->trdid, this->core->lid, cycle ); 599 #endif 600 } 601 602 //////////////////////////////////////////////////////////////////// 603 void rpc_ppm_display_server( xptr_t __attribute__((__unused__)) xp ) 604 { 605 #if DEBUG_RPC_PPM_DISPLAY 606 thread_t * this = CURRENT_THREAD; 607 uint32_t cycle = (uint32_t)hal_get_cycles(); 608 if( cycle > DEBUG_RPC_PPM_DISPLAY ) 609 printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", 610 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 611 #endif 612 613 // call local kernel function 614 ppm_display(); 615 616 #if DEBUG_RPC_PPM_DISPLAY 617 cycle = (uint32_t)hal_get_cycles(); 618 if( cycle > DEBUG_RPC_PPM_DISPLAY ) 619 printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", 620 __FUNCTION__, this->process->pid, this->trdid, this->core->lid, cycle ); 621 #endif 622 } 570 623 571 624 ///////////////////////////////////////////////////////////////////////////////////////// -
trunk/kernel/kern/rpc.h
r624 r625 62 62 RPC_PMEM_GET_PAGES = 0, 63 63 RPC_PMEM_RELEASE_PAGES = 1, 64 RPC_ UNDEFINED_2= 2,64 RPC_PPM_DISPLAY = 2, 65 65 RPC_PROCESS_MAKE_FORK = 3, 66 66 RPC_USER_DIR_CREATE = 4, … … 200 200 201 201 /*********************************************************************************** 202 * [2] undefined slot 203 **********************************************************************************/ 202 * [2] The RPC_PPM_DISPLAY allows any client thread to require any remote cluster 203 * identified by the <cxy> argumentto display the physical memory allocator state. 204 **********************************************************************************/ 205 void rpc_ppm_display_client( cxy_t cxy ); 206 207 void rpc_ppm_display_server( xptr_t xp ); 204 208 205 209 /*********************************************************************************** -
trunk/kernel/kern/scheduler.c
r624 r625 180 180 sched = &core->scheduler; 181 181 182 /////////////// scan user threads to handle both ACK and DELETE requests182 ////////////////// scan user threads to handle both ACK and DELETE requests 183 183 root = &sched->u_root; 184 184 iter = root->next; … … 240 240 busylock_release( &sched->lock ); 241 241 242 // check th_nr value 243 assert( (process->th_nr > 0) , "process th_nr cannot be 0\n" ); 244 245 // remove thread from process th_tbl[] 246 process->th_tbl[ltid] = NULL; 247 count = hal_atomic_add( &process->th_nr , - 1 ); 248 249 // release memory allocated for thread descriptor 250 thread_destroy( thread ); 242 // release memory allocated for thread 243 count = thread_destroy( thread ); 251 244 252 245 hal_fence(); … … 255 248 uint32_t cycle = (uint32_t)hal_get_cycles(); 256 249 if( DEBUG_SCHED_HANDLE_SIGNALS < cycle ) 257 printk("\n[%s] thread[%x,%x] on core[%x,%d] deleted / cycle %d\n",258 __FUNCTION__ , process->pid , thread->trdid , local_cxy , thread->core->lid, cycle );250 printk("\n[%s] thread[%x,%x] on core[%x,%d] deleted / %d threads / cycle %d\n", 251 __FUNCTION__, process->pid, thread->trdid, local_cxy, thread->core->lid, count, cycle ); 259 252 #endif 260 253 // destroy process descriptor if last thread … … 274 267 } // end user threads 275 268 276 ////// scan kernel threads for DELETE only269 ///////////// scan kernel threads for DELETE only 277 270 root = &sched->k_root; 278 271 iter = root->next; … … 290 283 291 284 // check process descriptor is local kernel process 292 assert( ( thread->process == &process_zero ) , "illegal process descriptor \n");285 assert( ( thread->process == &process_zero ) , "illegal process descriptor"); 293 286 294 287 // get thread ltid … … 325 318 326 319 // check th_nr value 327 assert( (process_zero.th_nr > 0) , "kernel process th_nr cannot be 0 \n" );320 assert( (process_zero.th_nr > 0) , "kernel process th_nr cannot be 0" ); 328 321 329 322 // remove thread from process th_tbl[] … … 477 470 } // end sched_register_thread() 478 471 479 ////////////////////////////////////// 480 void sched_yield( const char * cause )472 ////////////////////////////////////////////////////////////////// 473 void sched_yield( const char * cause __attribute__((__unused__)) ) 481 474 { 482 475 thread_t * next; … … 512 505 // check next thread kernel_stack overflow 513 506 assert( (next->signature == THREAD_SIGNATURE), 514 "kernel stack overflow for thread %x on core[%x,%d] \n", next, local_cxy, lid );507 "kernel stack overflow for thread %x on core[%x,%d]", next, local_cxy, lid ); 515 508 516 509 // check next thread attached to same core as the calling thread 517 510 assert( (next->core == current->core), 518 "next core %x != current core %x \n", next->core, current->core );511 "next core %x != current core %x", next->core, current->core ); 519 512 520 513 // check next thread not blocked when type != IDLE 521 514 assert( ((next->blocked == 0) || (next->type == THREAD_IDLE)) , 522 "next thread %x (%s) is blocked on core[%x,%d] \n",515 "next thread %x (%s) is blocked on core[%x,%d]", 523 516 next->trdid , thread_type_str(next->type) , local_cxy , lid ); 524 517 … … 561 554 #if (DEBUG_SCHED_YIELD & 1) 562 555 // if( sched->trace ) 563 if( uint32_t)hal_get_cycles() > DEBUG_SCHED_YIELD )556 if( (uint32_t)hal_get_cycles() > DEBUG_SCHED_YIELD ) 564 557 printk("\n[%s] core[%x,%d] / cause = %s\n" 565 558 " thread %x (%s) (%x,%x) continue / cycle %d\n", … … 584 577 list_entry_t * iter; 585 578 thread_t * thread; 586 587 // check lid588 assert( (lid < LOCAL_CLUSTER->cores_nr),589 "illegal core index %d\n", lid);590 579 591 580 core_t * core = &LOCAL_CLUSTER->core_tbl[lid]; … … 644 633 { 645 634 thread_t * thread; 646 647 // check cxy648 assert( (cluster_is_undefined( cxy ) == false),649 "illegal cluster %x\n", cxy );650 651 assert( (lid < hal_remote_l32( XPTR( cxy , &LOCAL_CLUSTER->cores_nr ) ) ),652 "illegal core index %d\n", lid );653 635 654 636 // get local pointer on target scheduler -
trunk/kernel/kern/thread.c
r624 r625 3 3 * 4 4 * Author Ghassan Almaless (2008,2009,2010,2011,2012) 5 * Alain Greiner (2016,2017,2018 )5 * Alain Greiner (2016,2017,2018,2019) 6 6 * 7 7 * Copyright (c) UPMC Sorbonne Universites … … 29 29 #include <hal_special.h> 30 30 #include <hal_remote.h> 31 #include <hal_vmm.h> 31 32 #include <memcpy.h> 32 33 #include <printk.h> … … 96 97 97 98 ///////////////////////////////////////////////////////////////////////////////////// 98 // This static function releases the physical memory for a thread descriptor.99 // It is called by the three functions:100 // - thread_user_create()101 // - thread_user_fork()102 // - thread_kernel_create()103 /////////////////////////////////////////////////////////////////////////////////////104 // @ thread : pointer on thread descriptor.105 /////////////////////////////////////////////////////////////////////////////////////106 static void thread_release( thread_t * thread )107 {108 kmem_req_t req;109 110 xptr_t base_xp = ppm_base2page( XPTR(local_cxy , thread ) );111 112 req.type = KMEM_PAGE;113 req.ptr = GET_PTR( base_xp );114 kmem_free( &req );115 }116 117 /////////////////////////////////////////////////////////////////////////////////////118 99 // This static function initializes a thread descriptor (kernel or user). 119 100 // It can be called by the four functions: … … 122 103 // - thread_kernel_create() 123 104 // - thread_idle_init() 105 // The "type" and "trdid" fields must have been previously set. 124 106 // It updates the local DQDT. 125 107 ///////////////////////////////////////////////////////////////////////////////////// 126 // @ thread : pointer on local thread descriptor127 // @ process : pointer on local process descriptor.128 // @ type : thread type.129 // @ func : pointer on thread entry function.130 // @ args : pointer on thread entry function arguments.131 // @ core_lid : target core local index.132 // @ u_stack_base : stack base (user thread only)133 // @ u _stack_size : stack base(user thread only)108 // @ thread : pointer on local thread descriptor 109 // @ process : pointer on local process descriptor. 110 // @ type : thread type. 111 // @ trdid : thread identifier 112 // @ func : pointer on thread entry function. 113 // @ args : pointer on thread entry function arguments. 114 // @ core_lid : target core local index. 115 // @ user_stack_vseg : local pointer on user stack vseg (user thread only) 134 116 ///////////////////////////////////////////////////////////////////////////////////// 135 117 static error_t thread_init( thread_t * thread, 136 118 process_t * process, 137 119 thread_type_t type, 120 trdid_t trdid, 138 121 void * func, 139 122 void * args, 140 123 lid_t core_lid, 141 intptr_t u_stack_base, 142 uint32_t u_stack_size ) 143 { 144 error_t error; 145 trdid_t trdid; // allocated thread identifier 146 147 cluster_t * local_cluster = LOCAL_CLUSTER; 124 vseg_t * user_stack_vseg ) 125 { 126 127 // check type and trdid fields initialized 128 assert( (thread->type == type) , "bad type argument" ); 129 assert( (thread->trdid == trdid) , "bad trdid argument" ); 148 130 149 131 #if DEBUG_THREAD_INIT … … 152 134 if( DEBUG_THREAD_INIT < cycle ) 153 135 printk("\n[%s] thread[%x,%x] enter for thread %x in process %x / cycle %d\n", 154 __FUNCTION__, this->process->pid, this->trdid, thread , process->pid , cycle );136 __FUNCTION__, this->process->pid, this->trdid, thread->trdid, process->pid , cycle ); 155 137 #endif 156 138 … … 159 141 160 142 // Initialize new thread descriptor 161 thread->type = type;162 143 thread->quantum = 0; // TODO 163 144 thread->ticks_nr = 0; // TODO 164 145 thread->time_last_check = 0; // TODO 165 thread->core = & local_cluster->core_tbl[core_lid];146 thread->core = &LOCAL_CLUSTER->core_tbl[core_lid]; 166 147 thread->process = process; 167 168 148 thread->busylocks = 0; 169 149 … … 172 152 #endif 173 153 174 thread->u_stack_base = u_stack_base; 175 thread->u_stack_size = u_stack_size; 154 thread->user_stack_vseg = user_stack_vseg; 176 155 thread->k_stack_base = (intptr_t)thread + desc_size; 177 156 thread->k_stack_size = CONFIG_THREAD_DESC_SIZE - desc_size; 178 179 157 thread->entry_func = func; // thread entry point 180 158 thread->entry_args = args; // thread function arguments … … 185 163 thread->blocked = THREAD_BLOCKED_GLOBAL; 186 164 187 // register new thread in process descriptor, and get a TRDID188 error = process_register_thread( process, thread , &trdid );189 190 if( error )191 {192 printk("\n[ERROR] in %s : thread %x in process %x cannot get TRDID in cluster %x\n"193 " for thread %s in process %x / cycle %d\n",194 __FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid,195 local_cxy, thread_type_str(type), process->pid, (uint32_t)hal_get_cycles() );196 return EINVAL;197 }198 199 // initialize trdid200 thread->trdid = trdid;201 202 165 // initialize sched list 203 166 list_entry_init( &thread->sched_list ); … … 237 200 } // end thread_init() 238 201 239 ////////////////////////////////////////////////// ///////202 ////////////////////////////////////////////////// 240 203 error_t thread_user_create( pid_t pid, 241 204 void * start_func, … … 246 209 error_t error; 247 210 thread_t * thread; // pointer on created thread descriptor 211 trdid_t trdid; // created thred identifier 248 212 process_t * process; // pointer to local process descriptor 249 213 lid_t core_lid; // selected core local index 250 vseg_t * vseg; //stack vseg214 vseg_t * us_vseg; // user stack vseg 251 215 252 216 assert( (attr != NULL) , "pthread attributes must be defined" ); … … 266 230 { 267 231 printk("\n[ERROR] in %s : cannot get process descriptor %x\n", 268 269 return ENOMEM;232 __FUNCTION__ , pid ); 233 return -1; 270 234 } 271 235 … … 284 248 printk("\n[ERROR] in %s : illegal core index attribute = %d\n", 285 249 __FUNCTION__ , core_lid ); 286 return EINVAL;250 return -1; 287 251 } 288 252 } … … 298 262 #endif 299 263 300 // allocate a stack from local VMM301 vseg = vmm_create_vseg( process,302 VSEG_TYPE_STACK,303 0, // size unused304 0, // length unused305 0, // file_offset unused306 0, // file_size unused307 XPTR_NULL, // mapper_xp unused308 local_cxy );309 310 if( vseg == NULL )311 {312 printk("\n[ERROR] in %s : cannot create stack vseg\n", __FUNCTION__ );313 return ENOMEM;314 }315 316 #if( DEBUG_THREAD_USER_CREATE & 1)317 if( DEBUG_THREAD_USER_CREATE < cycle )318 printk("\n[%s] stack vseg created / vpn_base %x / %d pages\n",319 __FUNCTION__, vseg->vpn_base, vseg->vpn_size );320 #endif321 322 264 // allocate memory for thread descriptor 323 265 thread = thread_alloc(); … … 325 267 if( thread == NULL ) 326 268 { 327 printk("\n[ERROR] in %s : cannot create new thread \n", __FUNCTION__ );328 vmm_delete_vseg( process->pid , vseg->min);329 return ENOMEM;269 printk("\n[ERROR] in %s : cannot create new thread in cluster %x\n", 270 __FUNCTION__, local_cxy ); 271 return -1; 330 272 } 331 273 … … 336 278 #endif 337 279 280 // set type in thread descriptor 281 thread->type = THREAD_USER; 282 283 // register new thread in process descriptor, and get a TRDID 284 error = process_register_thread( process, thread , &trdid ); 285 286 if( error ) 287 { 288 printk("\n[ERROR] in %s : cannot register new thread in process %x\n", 289 __FUNCTION__, pid ); 290 thread_destroy( thread ); 291 return -1; 292 } 293 294 // set trdid in thread descriptor 295 thread->trdid = trdid; 296 297 #if( DEBUG_THREAD_USER_CREATE & 1) 298 if( DEBUG_THREAD_USER_CREATE < cycle ) 299 printk("\n[%s] new thread %x registered in process %x\n", 300 __FUNCTION__, trdid, pid ); 301 #endif 302 303 // allocate a stack from local VMM 304 us_vseg = vmm_create_vseg( process, 305 VSEG_TYPE_STACK, 306 LTID_FROM_TRDID( trdid ), 307 0, // size unused 308 0, // file_offset unused 309 0, // file_size unused 310 XPTR_NULL, // mapper_xp unused 311 local_cxy ); 312 313 if( us_vseg == NULL ) 314 { 315 printk("\n[ERROR] in %s : cannot create stack vseg\n", __FUNCTION__ ); 316 process_remove_thread( thread ); 317 thread_destroy( thread ); 318 return -1; 319 } 320 321 #if( DEBUG_THREAD_USER_CREATE & 1) 322 if( DEBUG_THREAD_USER_CREATE < cycle ) 323 printk("\n[%s] stack vseg created / vpn_base %x / %d pages\n", 324 __FUNCTION__, us_vseg->vpn_base, us_vseg->vpn_size ); 325 #endif 326 338 327 // initialize thread descriptor 339 328 error = thread_init( thread, 340 329 process, 341 330 THREAD_USER, 331 trdid, 342 332 start_func, 343 333 start_arg, 344 334 core_lid, 345 vseg->min, 346 vseg->max - vseg->min ); 335 us_vseg ); 347 336 if( error ) 348 337 { 349 338 printk("\n[ERROR] in %s : cannot initialize new thread\n", __FUNCTION__ ); 350 vmm_delete_vseg( process->pid , vseg->min ); 351 thread_release( thread ); 352 return EINVAL; 339 vmm_remove_vseg( process , us_vseg ); 340 process_remove_thread( thread ); 341 thread_destroy( thread ); 342 return -1; 353 343 } 354 344 355 345 #if( DEBUG_THREAD_USER_CREATE & 1) 356 346 if( DEBUG_THREAD_USER_CREATE < cycle ) 357 printk("\n[%s] new thread descriptor initialised / trdid %x\n",358 __FUNCTION__, thread->trdid );347 printk("\n[%s] new thread %x in process %x initialised\n", 348 __FUNCTION__, thread->trdid, process->pid ); 359 349 #endif 360 350 … … 369 359 { 370 360 printk("\n[ERROR] in %s : cannot create CPU context\n", __FUNCTION__ ); 371 vmm_delete_vseg( process->pid , vseg->min ); 372 thread_release( thread ); 373 return ENOMEM; 361 vmm_remove_vseg( process , us_vseg ); 362 process_remove_thread( thread ); 363 thread_destroy( thread ); 364 return -1; 374 365 } 375 366 hal_cpu_context_init( thread ); … … 379 370 { 380 371 printk("\n[ERROR] in %s : cannot create FPU context\n", __FUNCTION__ ); 381 vmm_delete_vseg( process->pid , vseg->min ); 382 thread_release( thread ); 383 return ENOMEM; 372 vmm_remove_vseg( process , us_vseg ); 373 process_remove_thread( thread ); 374 thread_destroy( thread ); 375 return -1; 384 376 } 385 377 hal_fpu_context_init( thread ); … … 410 402 { 411 403 error_t error; 412 thread_t * child_ptr; // local pointer on local child thread 404 thread_t * child_ptr; // local pointer on child thread 405 trdid_t child_trdid; // child thread identifier 413 406 lid_t core_lid; // selected core local index 414 415 407 thread_t * parent_ptr; // local pointer on remote parent thread 416 408 cxy_t parent_cxy; // parent thread cluster 417 409 process_t * parent_process; // local pointer on parent process 418 410 xptr_t parent_gpt_xp; // extended pointer on parent thread GPT 419 420 void * func; // parent thread entry_func 421 void * args; // parent thread entry_args 422 intptr_t base; // parent thread u_stack_base 423 uint32_t size; // parent thread u_stack_size 424 uint32_t flags; // parent_thread flags 425 vpn_t vpn_base; // parent thread stack vpn_base 426 vpn_t vpn_size; // parent thread stack vpn_size 427 reg_t * uzone; // parent thread pointer on uzone 428 429 vseg_t * vseg; // child thread STACK vseg 411 void * parent_func; // parent thread entry_func 412 void * parent_args; // parent thread entry_args 413 uint32_t parent_flags; // parent_thread flags 414 vseg_t * parent_us_vseg; // parent thread user stack vseg 415 vseg_t * child_us_vseg; // child thread user stack vseg 430 416 431 417 #if DEBUG_THREAD_USER_FORK … … 433 419 thread_t * this = CURRENT_THREAD; 434 420 if( DEBUG_THREAD_USER_FORK < cycle ) 435 printk("\n[%s] thread[%x,%x] enter /child_process %x / cycle %d\n",421 printk("\n[%s] thread[%x,%x] enter for child_process %x / cycle %d\n", 436 422 __FUNCTION__, this->process->pid, this->trdid, child_process->pid, cycle ); 437 423 #endif … … 439 425 // select a target core in local cluster 440 426 core_lid = cluster_select_local_core(); 427 428 #if (DEBUG_THREAD_USER_FORK & 1) 429 if( DEBUG_THREAD_USER_FORK < cycle ) 430 printk("\n[%s] thread[%x,%x] selected core [%x,%d]\n", 431 __FUNCTION__, this->process->pid, this->trdid, local_cxy, core_lid ); 432 #endif 441 433 442 434 // get cluster and local pointer on parent thread descriptor … … 444 436 parent_ptr = GET_PTR( parent_thread_xp ); 445 437 446 // get relevant fields from parent thread 447 func = (void *) hal_remote_lpt( XPTR( parent_cxy , &parent_ptr->entry_func )); 448 args = (void *) hal_remote_lpt( XPTR( parent_cxy , &parent_ptr->entry_args )); 449 base = (intptr_t)hal_remote_lpt( XPTR( parent_cxy , &parent_ptr->u_stack_base )); 450 size = (uint32_t)hal_remote_l32 ( XPTR( parent_cxy , &parent_ptr->u_stack_size )); 451 flags = hal_remote_l32 ( XPTR( parent_cxy , &parent_ptr->flags )); 452 uzone = (reg_t *) hal_remote_lpt( XPTR( parent_cxy , &parent_ptr->uzone_current )); 453 454 vpn_base = base >> CONFIG_PPM_PAGE_SHIFT; 455 vpn_size = size >> CONFIG_PPM_PAGE_SHIFT; 438 // get relevant infos from parent thread 439 parent_func = (void *) hal_remote_lpt( XPTR(parent_cxy,&parent_ptr->entry_func )); 440 parent_args = (void *) hal_remote_lpt( XPTR(parent_cxy,&parent_ptr->entry_args )); 441 parent_flags = (uint32_t)hal_remote_l32( XPTR(parent_cxy,&parent_ptr->flags )); 442 parent_us_vseg = (vseg_t *)hal_remote_lpt( XPTR(parent_cxy,&parent_ptr->user_stack_vseg )); 456 443 457 444 // get pointer on parent process in parent thread cluster … … 459 446 &parent_ptr->process ) ); 460 447 461 // getextended pointer on parent GPT in parent thread cluster448 // build extended pointer on parent GPT in parent thread cluster 462 449 parent_gpt_xp = XPTR( parent_cxy , &parent_process->vmm.gpt ); 450 451 #if (DEBUG_THREAD_USER_FORK & 1) 452 if( DEBUG_THREAD_USER_FORK < cycle ) 453 printk("\n[%s] thread[%x,%x] get parent GPT\n", 454 __FUNCTION__, this->process->pid, this->trdid ); 455 #endif 463 456 464 457 // allocate memory for child thread descriptor 465 458 child_ptr = thread_alloc(); 459 466 460 if( child_ptr == NULL ) 467 461 { 468 printk("\n[ERROR] in %s : cannot allocate new thread\n", __FUNCTION__ ); 462 printk("\n[ERROR] in %s : cannot allocate new thread\n", 463 __FUNCTION__ ); 469 464 return -1; 470 465 } 466 467 #if (DEBUG_THREAD_USER_FORK & 1) 468 if( DEBUG_THREAD_USER_FORK < cycle ) 469 printk("\n[%s] thread[%x,%x] allocated new thread descriptor %x\n", 470 __FUNCTION__, this->process->pid, this->trdid, child_ptr ); 471 #endif 472 473 // set type in thread descriptor 474 child_ptr->type = THREAD_USER; 475 476 // register new thread in process descriptor, and get a TRDID 477 error = process_register_thread( child_process, child_ptr , &child_trdid ); 478 479 if( error ) 480 { 481 printk("\n[ERROR] in %s : cannot register new thread in process %x\n", 482 __FUNCTION__, child_process->pid ); 483 thread_destroy( child_ptr ); 484 return -1; 485 } 486 487 // set trdid in thread descriptor 488 child_ptr->trdid = child_trdid; 489 490 #if (DEBUG_THREAD_USER_FORK & 1) 491 if( DEBUG_THREAD_USER_FORK < cycle ) 492 printk("\n[%s] thread[%x,%x] registered child thread %x in child process %x\n", 493 __FUNCTION__, this->process->pid, this->trdid, child_trdid, child_process->pid ); 494 #endif 495 496 // get an user stack vseg from local VMM allocator 497 child_us_vseg = vmm_create_vseg( child_process, 498 VSEG_TYPE_STACK, 499 LTID_FROM_TRDID( child_trdid ), 500 0, // size unused 501 0, // file_offset unused 502 0, // file_size unused 503 XPTR_NULL, // mapper_xp unused 504 local_cxy ); 505 if( child_us_vseg == NULL ) 506 { 507 printk("\n[ERROR] in %s : cannot create stack vseg\n", __FUNCTION__ ); 508 process_remove_thread( child_ptr ); 509 thread_destroy( child_ptr ); 510 return -1; 511 } 512 513 #if (DEBUG_THREAD_USER_FORK & 1) 514 if( DEBUG_THREAD_USER_FORK < cycle ) 515 printk("\n[%s] thread[%x,%x] created an user stack vseg / vpn_base %x / %d pages\n", 516 __FUNCTION__, this->process->pid, this->trdid, 517 child_us_vseg->vpn_base, child_us_vseg->vpn_size ); 518 #endif 471 519 472 520 // initialize thread descriptor … … 474 522 child_process, 475 523 THREAD_USER, 476 func, 477 args, 524 child_trdid, 525 parent_func, 526 parent_args, 478 527 core_lid, 479 base, 480 size ); 528 child_us_vseg ); 481 529 if( error ) 482 530 { 483 531 printk("\n[ERROR] in %s : cannot initialize child thread\n", __FUNCTION__ ); 484 thread_release( child_ptr ); 485 return EINVAL; 532 vmm_remove_vseg( child_process , child_us_vseg ); 533 process_remove_thread( child_ptr ); 534 thread_destroy( child_ptr ); 535 return -1; 486 536 } 487 537 … … 492 542 #endif 493 543 494 // return child pointer495 *child_thread = child_ptr;496 497 544 // set detached flag if required 498 if( flags & THREAD_FLAG_DETACHED ) child_ptr->flags = THREAD_FLAG_DETACHED; 499 500 // update uzone pointer in child thread descriptor 501 child_ptr->uzone_current = (char *)((intptr_t)uzone + 502 (intptr_t)child_ptr - 503 (intptr_t)parent_ptr ); 504 505 506 // allocate CPU context for child thread 545 if( parent_flags & THREAD_FLAG_DETACHED ) child_ptr->flags = THREAD_FLAG_DETACHED; 546 547 // allocate a CPU context for child thread 507 548 if( hal_cpu_context_alloc( child_ptr ) ) 508 549 { 509 550 printk("\n[ERROR] in %s : cannot allocate CPU context\n", __FUNCTION__ ); 510 thread_release( child_ptr ); 551 vmm_remove_vseg( child_process , child_us_vseg ); 552 process_remove_thread( child_ptr ); 553 thread_destroy( child_ptr ); 511 554 return -1; 512 555 } 513 556 514 // allocate FPU context for child thread557 // allocate a FPU context for child thread 515 558 if( hal_fpu_context_alloc( child_ptr ) ) 516 559 { 517 560 printk("\n[ERROR] in %s : cannot allocate FPU context\n", __FUNCTION__ ); 518 thread_release( child_ptr ); 561 vmm_remove_vseg( child_process , child_us_vseg ); 562 process_remove_thread( child_ptr ); 563 thread_destroy( child_ptr ); 519 564 return -1; 520 565 } … … 526 571 #endif 527 572 528 // create and initialize STACK vseg 529 vseg = vseg_alloc(); 530 vseg_init( vseg, 531 VSEG_TYPE_STACK, 532 base, 533 size, 534 vpn_base, 535 vpn_size, 536 0, 0, XPTR_NULL, // not a file vseg 537 local_cxy ); 538 539 // register STACK vseg in local child VSL 540 vmm_attach_vseg_to_vsl( &child_process->vmm , vseg ); 541 542 #if (DEBUG_THREAD_USER_FORK & 1) 543 if( DEBUG_THREAD_USER_FORK < cycle ) 544 printk("\n[%s] thread[%x,%x] created stack vseg for thread %x in process %x\n", 545 __FUNCTION__, this->process->pid, this->trdid, child_ptr->trdid, child_process->pid ); 546 #endif 547 548 // copy all valid STACK GPT entries 549 vpn_t vpn; 550 bool_t mapped; 551 ppn_t ppn; 552 for( vpn = vpn_base ; vpn < (vpn_base + vpn_size) ; vpn++ ) 573 // scan parent GPT, and copy all valid entries 574 // associated to user stack vseg into child GPT 575 vpn_t parent_vpn; 576 vpn_t child_vpn; 577 bool_t mapped; 578 ppn_t ppn; 579 vpn_t parent_vpn_base = hal_remote_l32( XPTR( parent_cxy, &parent_us_vseg->vpn_base ) ); 580 vpn_t parent_vpn_size = hal_remote_l32( XPTR( parent_cxy, &parent_us_vseg->vpn_size ) ); 581 vpn_t child_vpn_base = child_us_vseg->vpn_base; 582 for( parent_vpn = parent_vpn_base , child_vpn = child_vpn_base ; 583 parent_vpn < (parent_vpn_base + parent_vpn_size) ; 584 parent_vpn++ , child_vpn++ ) 553 585 { 554 586 error = hal_gpt_pte_copy( &child_process->vmm.gpt, 587 child_vpn, 555 588 parent_gpt_xp, 556 vpn,589 parent_vpn, 557 590 true, // set cow 558 591 &ppn, … … 560 593 if( error ) 561 594 { 562 vmm_detach_vseg_from_vsl( &child_process->vmm , vseg );563 thread_release( child_ptr );564 595 printk("\n[ERROR] in %s : cannot update child GPT\n", __FUNCTION__ ); 596 vmm_remove_vseg( child_process , child_us_vseg ); 597 process_remove_thread( child_ptr ); 598 thread_destroy( child_ptr ); 565 599 return -1; 566 600 } 567 601 568 // increment pending forks counter for the page if mapped602 // increment pending forks counter for a mapped page 569 603 if( mapped ) 570 604 { … … 574 608 page_t * page_ptr = GET_PTR( page_xp ); 575 609 576 // getextended pointers on forks and lock fields610 // build extended pointers on forks and lock fields 577 611 xptr_t forks_xp = XPTR( page_cxy , &page_ptr->forks ); 578 612 xptr_t lock_xp = XPTR( page_cxy , &page_ptr->lock ); … … 586 620 // release lock protecting page 587 621 remote_busylock_release( lock_xp ); 622 } 623 } 588 624 589 625 #if (DEBUG_THREAD_USER_FORK & 1) 590 cycle = (uint32_t)hal_get_cycles();591 626 if( DEBUG_THREAD_USER_FORK < cycle ) 592 printk("\n[%s] thread[%x,%x] copied one PTE to child GPT : vpn %x / forks %d\n", 593 __FUNCTION__, this->process->pid, this->trdid, 594 vpn, hal_remote_l32( XPTR( page_cxy , &page_ptr->forks) ) ); 595 #endif 596 597 } 598 } 599 600 // set COW flag for all mapped entries of STAK vseg in parent thread GPT 627 printk("\n[%s] thread[%x,%x] copied all stack vseg PTEs to child GPT\n", 628 __FUNCTION__, this->process->pid, this->trdid ); 629 #endif 630 631 // set COW flag for all mapped entries of user stack vseg in parent GPT 601 632 hal_gpt_set_cow( parent_gpt_xp, 602 vpn_base, 603 vpn_size ); 604 633 parent_vpn_base, 634 parent_vpn_size ); 635 636 #if (DEBUG_THREAD_USER_FORK & 1) 637 if( DEBUG_THREAD_USER_FORK < cycle ) 638 printk("\n[%s] thread[%x,%x] set the COW flag for stack vseg in parent GPT\n", 639 __FUNCTION__, this->process->pid, this->trdid ); 640 #endif 641 642 // return child pointer 643 *child_thread = child_ptr; 644 605 645 #if DEBUG_THREAD_USER_FORK 606 646 cycle = (uint32_t)hal_get_cycles(); 607 647 if( DEBUG_THREAD_USER_FORK < cycle ) 608 printk("\n[%s] thread[%x,%x] exit / child_thread %x / cycle %d\n", 609 __FUNCTION__, this->process->pid, this->trdid, child_ptr, cycle ); 648 printk("\n[%s] thread[%x,%x] exit / created thread[%x,%x] / cycle %d\n", 649 __FUNCTION__, this->process->pid, this->trdid, 650 child_ptr->process->pid, child_ptr->trdid, cycle ); 610 651 #endif 611 652 … … 660 701 661 702 // allocate an user stack vseg for main thread 662 vseg_t * vseg = vmm_create_vseg( process,663 VSEG_TYPE_STACK,664 0, // size unused665 0, // length unused666 0, // file_offset unused667 0, // file_size unused668 XPTR_NULL, // mapper_xp unused669 local_cxy );670 if( vseg == NULL )703 vseg_t * us_vseg = vmm_create_vseg( process, 704 VSEG_TYPE_STACK, 705 LTID_FROM_TRDID( thread->trdid ), 706 0, // length unused 707 0, // file_offset unused 708 0, // file_size unused 709 XPTR_NULL, // mapper_xp unused 710 local_cxy ); 711 if( us_vseg == NULL ) 671 712 { 672 713 printk("\n[ERROR] in %s : cannot create stack vseg for main thread\n", __FUNCTION__ ); … … 675 716 676 717 // update user stack in thread descriptor 677 thread->u_stack_base = vseg->min; 678 thread->u_stack_size = vseg->max - vseg->min; 718 thread->user_stack_vseg = us_vseg; 679 719 680 720 // release FPU ownership if required … … 710 750 error_t error; 711 751 thread_t * thread; // pointer on new thread descriptor 752 trdid_t trdid; // new thread identifier 712 753 713 754 thread_t * this = CURRENT_THREAD; … … 737 778 } 738 779 780 // set type in thread descriptor 781 thread->type = type; 782 783 // register new thread in local kernel process descriptor, and get a TRDID 784 error = process_register_thread( &process_zero , thread , &trdid ); 785 786 if( error ) 787 { 788 printk("\n[ERROR] in %s : cannot register thread in kernel process\n", __FUNCTION__ ); 789 return -1; 790 } 791 792 // set trdid in thread descriptor 793 thread->trdid = trdid; 794 739 795 // initialize thread descriptor 740 796 error = thread_init( thread, 741 797 &process_zero, 742 798 type, 799 trdid, 743 800 func, 744 801 args, 745 802 core_lid, 746 0 , 0); // no user stack for a kernel thread803 NULL ); // no user stack for a kernel thread 747 804 748 805 if( error ) // release allocated memory for thread descriptor 749 806 { 750 printk("\n[ERROR] in %s : thread %x in process %x\n" 751 " cannot initialize thread descriptor\n", 752 __FUNCTION__, this->trdid, this->process->pid ); 753 thread_release( thread ); 807 printk("\n[ERROR] in %s : cannot initialize thread descriptor\n", __FUNCTION__ ); 808 thread_destroy( thread ); 754 809 return ENOMEM; 755 810 } … … 763 818 " cannot create CPU context\n", 764 819 __FUNCTION__, this->trdid, this->process->pid ); 765 thread_ release( thread );820 thread_destroy( thread ); 766 821 return EINVAL; 767 822 } … … 791 846 lid_t core_lid ) 792 847 { 848 trdid_t trdid; 849 error_t error; 793 850 794 851 // check arguments … … 796 853 assert( (core_lid < LOCAL_CLUSTER->cores_nr) , "illegal core index" ); 797 854 855 // set type in thread descriptor 856 thread->type = THREAD_IDLE; 857 858 // register idle thread in local kernel process descriptor, and get a TRDID 859 error = process_register_thread( &process_zero , thread , &trdid ); 860 861 assert( (error == 0), "cannot register idle_thread in kernel process" ); 862 863 // set trdid in thread descriptor 864 thread->trdid = trdid; 865 798 866 // initialize thread descriptor 799 error_t error = thread_init( thread, 800 &process_zero, 801 type, 802 func, 803 args, 804 core_lid, 805 0 , 0 ); // no user stack for a kernel thread 806 807 assert( (error == 0), "cannot create thread idle" ); 867 error = thread_init( thread, 868 &process_zero, 869 THREAD_IDLE, 870 trdid, 871 func, 872 args, 873 core_lid, 874 NULL ); // no user stack for a kernel thread 875 876 assert( (error == 0), "cannot initialize idle_thread" ); 808 877 809 878 // allocate & initialize CPU context if success 810 879 error = hal_cpu_context_alloc( thread ); 811 880 812 881 assert( (error == 0), "cannot allocate CPU context" ); 813 882 814 883 hal_cpu_context_init( thread ); … … 816 885 } // end thread_idle_init() 817 886 818 /////////////////////////////////////////////////////////////////////////////////////// 819 // TODO: check that all memory dynamically allocated during thread execution 820 // has been released => check vmm destroy for MMAP vsegs [AG] 821 /////////////////////////////////////////////////////////////////////////////////////// 822 void thread_destroy( thread_t * thread ) 823 { 824 reg_t save_sr; 825 826 process_t * process = thread->process; 827 core_t * core = thread->core; 887 //////////////////////////////////////////// 888 uint32_t thread_destroy( thread_t * thread ) 889 { 890 reg_t save_sr; 891 uint32_t count; 892 893 thread_type_t type = thread->type; 894 process_t * process = thread->process; 895 core_t * core = thread->core; 828 896 829 897 #if DEBUG_THREAD_DESTROY … … 835 903 #endif 836 904 837 // check busylocks counter905 // check calling thread busylocks counter 838 906 thread_assert_can_yield( thread , __FUNCTION__ ); 839 907 840 // update intrumentation values908 // update target process instrumentation counter 841 909 process->vmm.pgfault_nr += thread->info.pgfault_nr; 842 910 843 // release memory allocated for CPU context and FPU context 911 // remove thread from process th_tbl[] 912 count = process_remove_thread( thread ); 913 914 // release memory allocated for CPU context and FPU context if required 844 915 hal_cpu_context_destroy( thread ); 845 if ( thread->type == THREAD_USER )hal_fpu_context_destroy( thread );916 hal_fpu_context_destroy( thread ); 846 917 918 // release user stack vseg (for an user thread only) 919 if( type == THREAD_USER ) vmm_remove_vseg( process , thread->user_stack_vseg ); 920 847 921 // release FPU ownership if required 848 922 hal_disable_irq( &save_sr ); … … 857 931 thread->signature = 0; 858 932 859 // release memory for thread descriptor 860 thread_release( thread ); 933 // release memory for thread descriptor (including kernel stack) 934 kmem_req_t req; 935 xptr_t base_xp = ppm_base2page( XPTR(local_cxy , thread ) ); 936 937 req.type = KMEM_PAGE; 938 req.ptr = GET_PTR( base_xp ); 939 kmem_free( &req ); 861 940 862 941 #if DEBUG_THREAD_DESTROY … … 866 945 __FUNCTION__, this->process->pid, this->trdid, process->pid, thread->trdid, cycle ); 867 946 #endif 947 948 return count; 868 949 869 950 } // end thread_destroy() … … 993 1074 cxy_t target_cxy; // target thread cluster 994 1075 thread_t * target_ptr; // pointer on target thread 1076 process_t * target_process; // pointer on arget process 1077 pid_t target_pid; // target process identifier 995 1078 xptr_t target_flags_xp; // extended pointer on target thread <flags> 996 1079 xptr_t target_join_lock_xp; // extended pointer on target thread <join_lock> … … 1006 1089 target_ptr = GET_PTR( target_xp ); 1007 1090 1008 // get target thread identifier s, and attached flag1091 // get target thread identifier, attached flag, and process PID 1009 1092 target_trdid = hal_remote_l32( XPTR( target_cxy , &target_ptr->trdid ) ); 1010 1093 target_ltid = LTID_FROM_TRDID( target_trdid ); 1011 1094 target_flags_xp = XPTR( target_cxy , &target_ptr->flags ); 1012 1095 target_attached = ( (hal_remote_l32( target_flags_xp ) & THREAD_FLAG_DETACHED) == 0 ); 1096 target_process = hal_remote_lpt( XPTR( target_cxy , &target_ptr->process ) ); 1097 target_pid = hal_remote_l32( XPTR( target_cxy , &target_process->pid ) ); 1098 1099 // check target PID 1100 assert( (pid == target_pid), 1101 "unconsistent pid and target_xp arguments" ); 1013 1102 1014 1103 // get killer thread pointers … … 1027 1116 // must be deleted by the parent process sys_wait() function 1028 1117 assert( ((CXY_FROM_PID( pid ) != target_cxy) || (target_ltid != 0)), 1029 "t harget thread cannot be the main thread\n" );1118 "target thread cannot be the main thread" ); 1030 1119 1031 1120 // check killer thread can yield … … 1151 1240 void thread_idle_func( void ) 1152 1241 { 1242 1243 #if DEBUG_THREAD_IDLE 1244 uint32_t cycle; 1245 #endif 1246 1153 1247 while( 1 ) 1154 1248 { … … 1161 1255 1162 1256 #if DEBUG_THREAD_IDLE 1163 { 1164 uint32_t cycle = (uint32_t)hal_get_cycles(); 1257 cycle = (uint32_t)hal_get_cycles(); 1165 1258 if( DEBUG_THREAD_IDLE < cycle ) 1166 1259 printk("\n[%s] idle thread on core[%x,%d] goes to sleep / cycle %d\n", 1167 1260 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, cycle ); 1168 }1169 1261 #endif 1170 1262 … … 1172 1264 1173 1265 #if DEBUG_THREAD_IDLE 1174 { 1175 uint32_t cycle = (uint32_t)hal_get_cycles(); 1266 cycle = (uint32_t)hal_get_cycles(); 1176 1267 if( DEBUG_THREAD_IDLE < cycle ) 1177 1268 printk("\n[%s] idle thread on core[%x,%d] wake up / cycle %d\n", 1178 1269 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, cycle ); 1179 }1180 1270 #endif 1181 1271 … … 1183 1273 1184 1274 #if DEBUG_THREAD_IDLE 1185 { 1186 uint32_t cycle = (uint32_t)hal_get_cycles(); 1275 cycle = (uint32_t)hal_get_cycles(); 1187 1276 if( DEBUG_THREAD_IDLE < cycle ) 1188 1277 sched_display( CURRENT_THREAD->core->lid ); 1189 }1190 1278 #endif 1191 1279 // search a runable thread -
trunk/kernel/kern/thread.h
r619 r625 3 3 * 4 4 * Author Ghassan Almaless (2008,2009,2010,2011,2012) 5 * Alain Greiner (2016,2017,2018 )5 * Alain Greiner (2016,2017,2018,2019) 6 6 * 7 7 * Copyright (c) UPMC Sorbonne Universites … … 29 29 #include <shared_syscalls.h> 30 30 #include <hal_special.h> 31 #include <hal_kentry.h> 31 32 #include <xlist.h> 32 33 #include <list.h> … … 100 101 { 101 102 uint32_t pgfault_nr; /*! cumulated number of page fault */ 102 uint32_t sched_nr; /*! TODO ??? [AG] */103 uint32_t u_err_nr; /*! TODO ??? [AG] */104 uint32_t m_err_nr; /*! TODO ??? [AG] */105 103 cycle_t last_cycle; /*! last cycle counter value (date) */ 106 104 cycle_t usr_cycles; /*! user execution duration (cycles) */ … … 121 119 * 122 120 * WARNING (1) Don't modify the first 4 fields order, as this order is used by the 123 * hal_kentry assembly code for some architectures (TSAR).121 * hal_kentry assembly code for the TSAR architectures. 124 122 * 125 123 * WARNING (2) Most of the thread state is private and accessed only by this thread, … … 165 163 uint32_t * ack_rsp_count; /*! pointer on acknowledge response counter */ 166 164 167 intptr_t u_stack_base; /*! user stack base address */ 168 uint32_t u_stack_size; /*! user stack size (bytes) */ 165 vseg_t * user_stack_vseg; /*! local pointer on user stack vseg */ 169 166 170 167 void * entry_func; /*! pointer on entry function */ … … 248 245 249 246 /*************************************************************************************** 250 * This function is used by the sys_fork() system call to create the "child" thread 251 * in the local cluster. It allocates memory for a thread descriptor, and initializes 252 * it from the "parent" thread descriptor defined by the <parent_thread_xp> argument. 247 * This function is used by the sys_fork() syscall to create the "child" main thread 248 * in the local cluster. It is called, generally through the RPC_PROCESS_MAKE_FORK, 249 * by the process_make_fork() function. It allocates memory from the local cluster 250 * for a "child" thread descriptor, and initializes it from the "parent" thread 251 * descriptor defined by the <parent_thread_xp> argument. 253 252 * The new thread is attached to the core that has the lowest load in local cluster. 254 253 * It is registered in the "child" process defined by the <child_process> argument. … … 259 258 * uses physical addressing on some architectures). 260 259 * The CPU and FPU execution contexts are created and linked to the new thread. 261 * but the actual context copy is NOT done, and must be done by by the sys_fork().260 * but the actual context copy is NOT done, and is done by the sys_fork() function. 262 261 * The THREAD_BLOCKED_GLOBAL bit is set => the thread must be activated to start. 263 262 *************************************************************************************** … … 273 272 /*************************************************************************************** 274 273 * This function is called by the process_make_exec() function to re-initialise the 275 * thread descriptor of the calling thread (that will become the new process main 276 * thread), and immediately jump to user code without returning to kentry!!! 274 * calling thread descriptor, that will become the new process main thread. 277 275 * It must be called by the main thread of the calling process. 276 * - The calling thread TRDID is not modified. 277 * - The kernel stack (currently in use) is not modified. 278 278 * - A new user stack vseg is created and initialised. 279 * - The kernel stack (currently in use) is not modified.280 279 * - The function calls the hal_cpu_context_exec() to re-initialize the CPU context 281 * an jump to user code.280 * and the uzone registered in kernel stack, an jump to user code. 282 281 *************************************************************************************** 283 282 * @ entry_func : main thread entry point. … … 329 328 330 329 /*************************************************************************************** 331 * This low-level function is called by the sched_handle_signals() function to releases 332 * the physical memory allocated for a thread in a given cluster, when this thread 333 * is marked for delete. This include the thread descriptor itself, the associated 334 * CPU and FPU context, and the physical memory allocated for an user thread local stack. 330 * This low-level function is called by the sched_handle_signals() function when a 331 * thread is marked for delete. It removes the thread identified by the <thread> 332 * argument from the process th_tbl[], and releases all physical memory allocated for 333 * this. This includes the thread descriptor itself, the associated CPU and FPU context, 334 * and the physical memory allocated for an user thread stack. 335 335 *************************************************************************************** 336 336 * @ thread : pointer on the thread descriptor to release. 337 * @ return t rue, if the thread was the last registerd thread in local process.338 **************************************************************************************/ 339 voidthread_destroy( thread_t * thread );337 * @ return the number of threads registered in the process th_tbl[] before deletion. 338 **************************************************************************************/ 339 uint32_t thread_destroy( thread_t * thread ); 340 340 341 341 /*************************************************************************************** … … 383 383 * This function is used by the four sys_thread_cancel(), sys_thread_exit(), 384 384 * sys_kill() and sys_exit() system calls to mark for delete a given thread. 385 * It set the THREAD_BLOCKED_GLOBAL bit and set the the THREAD_FLAG_REQ_DELETE bit386 * in thethread descriptor identified by the <thread_xp> argument, to ask the scheduler385 * It set the THREAD_BLOCKED_GLOBAL bit and set the THREAD_FLAG_REQ_DELETE bit in the 386 * thread descriptor identified by the <thread_xp> argument, to ask the scheduler 387 387 * to asynchronously delete the target thread, at the next scheduling point. 388 * The calling thread can run in any cluster, as it uses remote accesses, but 389 * the target thread cannot be the main thread of the process identified by the <pid> 390 * argument, because the main thread must be deleted by the parent process argument. 388 * The calling thread can run in any cluster, as it uses remote accesses. 389 * This function makes a kernel panic if the target thread is the main thread, 390 * because * the main thread deletion will cause the process deletion, and a process 391 * must be deleted by the parent process, running the wait function. 391 392 * If the target thread is running in "attached" mode, and the <is_forced> argument 392 393 * is false, this function implements the required sychronisation with the joining -
trunk/kernel/kernel_config.h
r624 r625 26 26 #define _KERNEL_CONFIG_H_ 27 27 28 #define CONFIG_ALMOS_VERSION "Version 1.1 / October 2018"28 #define CONFIG_ALMOS_VERSION "Version 2.0 / April 2019" 29 29 30 30 //////////////////////////////////////////////////////////////////////////////////////////// … … 40 40 41 41 #define DEBUG_BUSYLOCK 0 42 #define DEBUG_BUSYLOCK_PID 0x10001 // thread pid (when detailed debug)43 #define DEBUG_BUSYLOCK_TRDID 0x10000 // thread trdid (when detailed debug)42 #define DEBUG_BUSYLOCK_PID 0x10001 // for busylock detailed debug 43 #define DEBUG_BUSYLOCK_TRDID 0x10000 // for busylock detailed debug 44 44 45 45 #define DEBUG_CHDEV_CMD_RX 0 … … 92 92 #define DEBUG_FATFS_UPDATE_DENTRY 0 93 93 94 #define DEBUG_HAL_CONTEXT 0 94 95 #define DEBUG_HAL_EXCEPTIONS 0 95 96 #define DEBUG_HAL_GPT_SET_PTE 0 … … 164 165 165 166 #define DEBUG_SCHED_HANDLE_SIGNALS 2 166 #define DEBUG_SCHED_YIELD 0 167 #define DEBUG_SCHED_YIELD 0 167 168 #define DEBUG_SCHED_RPC_ACTIVATE 0 168 169 … … 186 187 #define DEBUG_SYS_IS_FG 0 187 188 #define DEBUG_SYS_KILL 0 188 #define DEBUG_SYS_OPEN 0189 #define DEBUG_SYS_OPENDIR 0190 189 #define DEBUG_SYS_MKDIR 0 191 190 #define DEBUG_SYS_MMAP 0 192 191 #define DEBUG_SYS_MUNMAP 0 193 192 #define DEBUG_SYS_MUTEX 0 193 #define DEBUG_SYS_OPEN 0 194 #define DEBUG_SYS_OPENDIR 0 194 195 #define DEBUG_SYS_READ 0 195 196 #define DEBUG_SYS_READDIR 0 … … 230 231 #define DEBUG_VFS_INODE_CREATE 0 231 232 #define DEBUG_VFS_INODE_LOAD_ALL 0 233 #define DEBUG_VFS_KERNEL_MOVE 0 232 234 #define DEBUG_VFS_LINK 0 233 235 #define DEBUG_VFS_LOOKUP 0 234 236 #define DEBUG_VFS_LSEEK 0 235 237 #define DEBUG_VFS_MKDIR 0 236 #define DEBUG_VFS_NEW_ CHILD_INIT0238 #define DEBUG_VFS_NEW_DENTRY_INIT 0 237 239 #define DEBUG_VFS_OPEN 0 238 240 #define DEBUG_VFS_OPENDIR 0 239 241 #define DEBUG_VFS_STAT 0 242 #define DEBUG_VFS_USER_MOVE 0 240 243 #define DEBUG_VFS_UNLINK 0 241 244 … … 248 251 #define DEBUG_VMM_HANDLE_PAGE_FAULT 0 249 252 #define DEBUG_VMM_HANDLE_COW 0 250 #define DEBUG_VMM_INIT 0251 253 #define DEBUG_VMM_MMAP_ALLOC 0 252 254 #define DEBUG_VMM_PAGE_ALLOCATE 0 255 #define DEBUG_VMM_REMOVE_VSEG 0 253 256 #define DEBUG_VMM_RESIZE_VSEG 0 254 257 #define DEBUG_VMM_SET_COW 0 255 258 #define DEBUG_VMM_UPDATE_PTE 0 259 #define DEBUG_VMM_USER_INIT 0 260 #define DEBUG_VMM_USER_RESET 0 256 261 257 262 #define DEBUG_XHTAB 0 … … 421 426 #define CONFIG_VMM_ARGS_SIZE 0x000004 // args vseg size : 16 Kbytes 422 427 #define CONFIG_VMM_ENVS_SIZE 0x000008 // envs vseg size : 32 Kbytes 423 #define CONFIG_VMM_STACK_SIZE 0x00 0100 // single stack vseg size : 1Mbytes428 #define CONFIG_VMM_STACK_SIZE 0x001000 // single stack vseg size : 16 Mbytes 424 429 425 430 //////////////////////////////////////////////////////////////////////////////////////////// -
trunk/kernel/libk/elf.c
r603 r625 196 196 197 197 #if DEBUG_ELF_LOAD 198 uint32_t cycle = (uint32_t)hal_get_cycles(); 199 if( DEBUG_ELF_LOAD < cycle ) 200 printk("\n[%s] found %s vseg / base %x / size %x\n" 198 uint32_t cycle = (uint32_t)hal_get_cycles(); 199 thread_t * this = CURRENT_THREAD; 200 if( DEBUG_ELF_LOAD < cycle ) 201 printk("\n[%s] thread[%x,%x] found %s vseg / base %x / size %x\n" 201 202 " file_size %x / file_offset %x / mapper_xp %l / cycle %d\n", 202 __FUNCTION__ , vseg_type_str(vseg->type) , vseg->min , vseg->max - vseg->min , 203 __FUNCTION__ , this->process_pid, this->trdid, 204 vseg_type_str(vseg->type) , vseg->min , vseg->max - vseg->min , 203 205 vseg->file_size , vseg->file_offset , vseg->mapper_xp ); 204 206 #endif -
trunk/kernel/libk/elf.h
r457 r625 228 228 229 229 /**************************************************************************************** 230 * This function registers in VMM of the process identified by the <process> argument231 * the CODE and DATA vsegs defined in the .elf openfile descriptor <file_xp>.230 * This function registers in the VSL of the process identified by the <process> 231 * argument the CODE and DATA vsegs defined in the .elf file descriptor <file_xp>. 232 232 * The segments are not loaded in memory. 233 233 * It also registers the process entry point in VMM. -
trunk/kernel/libk/remote_rwlock.c
r623 r625 251 251 thread_t * thread_ptr = GET_PTR( thread_xp ); 252 252 253 printk("\n@@@ in %s : release first waiting writer[%x,%x]\n", 254 __FUNCTION__, thread_cxy, thread_ptr ); 255 253 256 // remove this waiting thread from waiting list 254 257 xlist_unlink( XPTR( thread_cxy , &thread_ptr->wait_xlist ) ); -
trunk/kernel/mm/mapper.c
r624 r625 153 153 154 154 #if DEBUG_MAPPER_GET_PAGE 155 uint32_t cycle = (uint32_t)hal_get_cycles(); 155 vfs_inode_t * inode = hal_remote_lpt( XPTR( mapper_cxy , &mapper_ptr->inode ) ); 156 uint32_t cycle = (uint32_t)hal_get_cycles(); 156 157 char name[CONFIG_VFS_MAX_NAME_LENGTH]; 157 vfs_inode_t * inode = hal_remote_lpt( XPTR( mapper_cxy , &mapper_ptr->inode ) ); 158 vfs_inode_get_name( XPTR( mapper_cxy , inode ) , name ); 159 if( DEBUG_MAPPER_GET_PAGE < cycle ) 160 printk("\n[%s] thread [%x,%x] enter for page %d of <%s> / cycle %d\n", 161 __FUNCTION__, this->process->pid, this->trdid, page_id, name, cycle ); 158 if( (DEBUG_MAPPER_GET_PAGE < cycle) && (inode == NULL) ) // FAT mapper 159 { 160 printk("\n[%s] thread[%x,%x] enter for page %d of FAT mapper / cycle %d\n", 161 __FUNCTION__, this->process->pid, this->trdid, page_id, cycle ); 162 } 163 if( (DEBUG_MAPPER_GET_PAGE < cycle) && (inode != NULL) ) // file mapper 164 { 165 vfs_inode_get_name( XPTR( mapper_cxy , inode ) , name ); 166 printk("\n[%s] thread[%x,%x] enter for page %d of <%s> mapper / cycle %d\n", 167 __FUNCTION__, this->process->pid, this->trdid, page_id, name, cycle ); 168 } 162 169 #endif 163 170 … … 235 242 #if DEBUG_MAPPER_GET_PAGE 236 243 cycle = (uint32_t)hal_get_cycles(); 237 if( DEBUG_MAPPER_GET_PAGE < cycle ) 238 printk("\n[%s] thread[%x,%x] exit for page %d of <%s> / ppn %x / cycle %d\n", 239 __FUNCTION__, this->process->pid, this->trdid, 240 page_id, name, ppm_page2ppn( page_xp ), cycle ); 244 if( (DEBUG_MAPPER_GET_PAGE < cycle) && (inode != NULL) ) 245 { 246 printk("\n[%s] thread[%x,%x] exit for page %d of <%s> mapper / ppn %x / cycle %d\n", 247 __FUNCTION__, this->process->pid, this->trdid, page_id, 248 name, ppm_page2ppn(page_xp), cycle ); 249 } 250 if( (DEBUG_MAPPER_GET_PAGE < cycle) && (inode == NULL) ) 251 { 252 printk("\n[%s] thread[%x,%x] exit for page %d of FAT mapper / ppn %x / cycle %d\n", 253 __FUNCTION__, this->process->pid, this->trdid, page_id, 254 ppm_page2ppn(page_xp), cycle ); 255 } 241 256 #endif 242 257 … … 257 272 258 273 #if DEBUG_MAPPER_HANDLE_MISS 259 uint32_t cycle = (uint32_t)hal_get_cycles();274 uint32_t cycle = (uint32_t)hal_get_cycles(); 260 275 char name[CONFIG_VFS_MAX_NAME_LENGTH]; 261 276 vfs_inode_t * inode = mapper->inode; 262 vfs_inode_get_name( XPTR( local_cxy , inode ) , name ); 263 if( DEBUG_MAPPER_HANDLE_MISS < cycle ) 264 printk("\n[%s] enter for page %d in <%s> / cycle %d", 265 __FUNCTION__, page_id, name, cycle ); 266 if( DEBUG_MAPPER_HANDLE_MISS & 1 ) 267 grdxt_display( XPTR( local_cxy , &mapper->rt ) , name ); 277 if( (DEBUG_MAPPER_HANDLE_MISS < cycle) && (inode != NULL) ) 278 { 279 vfs_inode_get_name( XPTR( local_cxy , inode ) , name ); 280 printk("\n[%s] thread[%x,%x] enter for page %d in <%s> / cycle %d", 281 __FUNCTION__, this->process->pid, this->trdid, page_id, name, cycle ); 282 if( DEBUG_MAPPER_HANDLE_MISS & 1 ) grdxt_display( XPTR(local_cxy,&mapper->rt), name ); 283 } 284 if( (DEBUG_MAPPER_HANDLE_MISS < cycle) && (inode == NULL) ) 285 { 286 printk("\n[%s] thread[%x,%x] enter for page %d in FAT / cycle %d", 287 __FUNCTION__, this->process->pid, this->trdid, page_id, cycle ); 288 if( DEBUG_MAPPER_HANDLE_MISS & 1 ) grdxt_display( XPTR(local_cxy,&mapper->rt), "FAT" ); 289 } 268 290 #endif 269 291 … … 321 343 #if DEBUG_MAPPER_HANDLE_MISS 322 344 cycle = (uint32_t)hal_get_cycles(); 323 if( DEBUG_MAPPER_HANDLE_MISS < cycle ) 324 printk("\n[%s] exit for page %d in <%s> / ppn %x / cycle %d", 325 __FUNCTION__, page_id, name, ppm_page2ppn( *page_xp ), cycle ); 326 if( DEBUG_MAPPER_HANDLE_MISS & 1 ) 327 grdxt_display( XPTR( local_cxy , &mapper->rt ) , name ); 345 if( (DEBUG_MAPPER_HANDLE_MISS < cycle) && (inode != NULL) ) 346 { 347 printk("\n[%s] thread[%x,%x] exit for page %d in <%s> / ppn %x / cycle %d", 348 __FUNCTION__, this->process->pid, this->trdid, 349 page_id, name, ppm_page2ppn( *page_xp ), cycle ); 350 if( DEBUG_MAPPER_HANDLE_MISS & 1 ) grdxt_display( XPTR(local_cxy,&mapper->rt) , name ); 351 } 352 if( (DEBUG_MAPPER_HANDLE_MISS < cycle) && (inode == NULL) ) 353 { 354 printk("\n[%s] thread[%x,%x] exit for page %d in FAT / ppn %x / cycle %d", 355 __FUNCTION__, this->process->pid, this->trdid, 356 page_id, ppm_page2ppn( *page_xp ), cycle ); 357 if( DEBUG_MAPPER_HANDLE_MISS & 1 ) grdxt_display( XPTR(local_cxy,&mapper->rt ), "FAT" ); 358 } 328 359 #endif 329 360 … … 482 513 483 514 #if DEBUG_MAPPER_MOVE_KERNEL 484 uint32_t cycle = (uint32_t)hal_get_cycles(); 485 thread_t * this = CURRENT_THREAD; 515 char name[CONFIG_VFS_MAX_NAME_LENGTH]; 516 uint32_t cycle = (uint32_t)hal_get_cycles(); 517 thread_t * this = CURRENT_THREAD; 518 mapper_t * mapper = GET_PTR( mapper_xp ); 519 vfs_inode_t * inode = hal_remote_lpt( XPTR( mapper_cxy , &mapper->inode ) ); 520 vfs_inode_get_name( XPTR( mapper_cxy , inode ) , name ); 486 521 if( DEBUG_MAPPER_MOVE_KERNEL < cycle ) 487 printk("\n[%s] thread[%x,%x] enter / to_buf %d / buf_cxy %x / buf_ptr %x/ cycle %d\n",488 __FUNCTION__, this->process->pid, this->trdid, to_buffer, buffer_cxy, buffer_ptr, cycle );522 printk("\n[%s] thread[%x,%x] enter / %d bytes / offset %d / mapper <%s> / cycle %d\n", 523 __FUNCTION__, this->process->pid, this->trdid, size, file_offset, name, cycle ); 489 524 #endif 490 525 … … 496 531 uint32_t first = min_byte >> CONFIG_PPM_PAGE_SHIFT; 497 532 uint32_t last = max_byte >> CONFIG_PPM_PAGE_SHIFT; 498 499 #if (DEBUG_MAPPER_MOVE_KERNEL & 1)500 if( DEBUG_MAPPER_MOVE_KERNEL < cycle )501 printk("\n[%s] first_page %d / last_page %d\n", __FUNCTION__, first, last );502 #endif503 533 504 534 // compute source and destination clusters … … 528 558 else if ( page_id == last ) page_count = (max_byte & CONFIG_PPM_PAGE_MASK) + 1; 529 559 else page_count = CONFIG_PPM_PAGE_SIZE; 530 531 #if (DEBUG_MAPPER_MOVE_KERNEL & 1)532 if( DEBUG_MAPPER_MOVE_KERNEL < cycle )533 printk("\n[%s] page_id = %d / offset = %d / bytes = %d\n",534 __FUNCTION__ , page_id , page_offset , page_count );535 #endif536 560 537 561 // get extended pointer on page descriptor … … 560 584 #if (DEBUG_MAPPER_MOVE_KERNEL & 1) 561 585 if( DEBUG_MAPPER_MOVE_KERNEL < cycle ) 562 printk("\n[%s] src_cxy %x / src_ptr %x / dst_cxy %x / dst_ptr %x\n", 563 __FUNCTION__, src_cxy, src_ptr, dst_cxy, dst_ptr ); 586 { 587 if( to_buffer ) 588 printk("\n[%s] mapper <%s> page %d => buffer(%x,%x) / %d bytes\n", 589 __FUNCTION__, name, page_id, dst_cxy, dst_ptr, page_count ); 590 else 591 printk("\n[%s] buffer(%x,%x) => mapper <%s> page %d / %d bytes\n", 592 __FUNCTION__, src_cxy, src_ptr, name, page_id, page_count ); 593 } 564 594 #endif 565 595 … … 571 601 572 602 #if DEBUG_MAPPER_MOVE_KERNEL 573 cycle = (uint32_t)hal_get_cycles();603 cycle = (uint32_t)hal_get_cycles(); 574 604 if( DEBUG_MAPPER_MOVE_KERNEL < cycle ) 575 printk("\n[%s] thread[%x,%x] exit / to_buf %d / buf_cxy %x / buf_ptr %x /cycle %d\n",576 __FUNCTION__, this->process->pid, this->trdid, to_buffer, buffer_cxy, buffer_ptr,cycle );605 printk("\n[%s] thread[%x,%x] exit / cycle %d\n", 606 __FUNCTION__, this->process->pid, this->trdid, cycle ); 577 607 #endif 578 608 … … 662 692 663 693 // get pointer on radix tree 664 rt 694 rt = &mapper->rt; 665 695 666 696 // initialise loop variable … … 675 705 if( page == NULL ) break; 676 706 677 assert( (page->index == found_key ), __FUNCTION__,"wrong page descriptor index" );678 assert( (page->order == 0), __FUNCTION__,"mapper page order must be 0" );707 assert( (page->index == found_key ), "wrong page descriptor index" ); 708 assert( (page->order == 0), "mapper page order must be 0" ); 679 709 680 710 // build extended pointer on page descriptor … … 730 760 char buffer[4096]; // local buffer 731 761 uint32_t * tabi; // pointer on uint32_t to scan buffer 732 char * tabc; // pointer on char to scan buffer733 762 uint32_t line; // line index 734 763 uint32_t word; // word index 735 uint32_t n; // char index736 764 cxy_t mapper_cxy; // mapper cluster identifier 737 765 mapper_t * mapper_ptr; // mapper local pointer … … 776 804 // display 8 words per line 777 805 tabi = (uint32_t *)buffer; 778 tabc = (char *)buffer;779 806 printk("\n***** <%s> first %d bytes of page %d *****\n", name, nbytes, page_id ); 780 807 for( line = 0 ; line < (nbytes >> 5) ; line++ ) 781 808 { 782 printk("%X : ", line );809 printk("%X : ", line << 5 ); 783 810 for( word = 0 ; word < 8 ; word++ ) printk("%X ", tabi[(line<<3) + word] ); 784 printk(" | ");785 for( n = 0 ; n < 32 ; n++ ) printk("%c", tabc[(line<<5) + n] );786 811 printk("\n"); 787 812 } -
trunk/kernel/mm/mapper.h
r623 r625 123 123 124 124 /******************************************************************************************* 125 * This function move data between a remote mapper, dentified by the <mapper_xp> argument,125 * This function move data between a remote mapper, identified by the <mapper_xp> argument, 126 126 * and a distributed user buffer. It can be called by a thread running in any cluster. 127 127 * It is called by the vfs_user_move() to implement sys_read() and sys_write() syscalls. … … 148 148 149 149 /******************************************************************************************** 150 * This function move data between a remote mapper and a remote kernel buffer.151 * It can be called by a thread running any cluster.150 * This function move data between a remote mapper, identified by the <mapper_xp> argument, 151 * and a localised remote kernel buffer. It can be called by a thread running any cluster. 152 152 * If required, the data transfer is split in "fragments", where one fragment contains 153 153 * contiguous bytes in the same mapper page. … … 215 215 /******************************************************************************************* 216 216 * This function allows to write a single word to a mapper seen as and array of uint32_t. 217 * It has bee designed to support remote access tho the FAT mapper of the FATFS.217 * It has been designed to support remote access to the FAT mapper of the FATFS. 218 218 * It can be called by any thread running in any cluster. 219 219 * In case of miss, it takes the mapper lock in WRITE_MODE, load the missing -
trunk/kernel/mm/page.h
r623 r625 50 50 * test/modify the forks counter or the page flags. 51 51 * - The list entry is used to register the page in a free list or in dirty list. 52 * NOTE: Size is 48 bytes for a 32 bits core.53 * TODO : the refcount use is not defined [AG]52 * The refcount is used for page release to KMEM. 53 * NOTE: the size is 48 bytes for a 32 bits core. 54 54 ************************************************************************************/ 55 55 … … 61 61 uint32_t index; /*! page index in mapper (4) */ 62 62 list_entry_t list; /*! for both dirty pages and free pages (8) */ 63 uint32_t refcount; /*! reference counter TODO ??? [AG](4) */63 int32_t refcount; /*! references counter for page release (4) */ 64 64 uint32_t forks; /*! number of pending forks (4) */ 65 65 remote_busylock_t lock; /*! protect forks or flags modifs (16) */ -
trunk/kernel/mm/ppm.c
r611 r625 349 349 } // end ppm_free_pages() 350 350 351 //////////////////////// ///////352 void ppm_ print( char * string)351 //////////////////////// 352 void ppm_display( void ) 353 353 { 354 354 uint32_t order; … … 361 361 busylock_acquire( &ppm->free_lock ); 362 362 363 printk("\n*** PPM in cluster %x / %s / %d pages ***\n", 364 local_cxy , string, ppm->pages_nr ); 363 printk("\n***** PPM in cluster %x / %d pages\n", local_cxy , ppm->pages_nr ); 365 364 366 365 for( order = 0 ; order < CONFIG_PPM_MAX_ORDER ; order++ ) -
trunk/kernel/mm/ppm.h
r623 r625 176 176 * string : character string printed in header 177 177 ****************************************************************************************/ 178 void ppm_ print( char * string);178 void ppm_display( void ); 179 179 180 180 /***************************************************************************************** -
trunk/kernel/mm/vmm.c
r624 r625 4 4 * Authors Ghassan Almaless (2008,2009,2010,2011, 2012) 5 5 * Mohamed Lamine Karaoui (2015) 6 * Alain Greiner (2016,2017,2018 )6 * Alain Greiner (2016,2017,2018,2019) 7 7 * 8 8 * Copyright (c) UPMC Sorbonne Universites … … 55 55 extern process_t process_zero; // allocated in cluster.c 56 56 57 /////////////////////////////////////// 58 error_t vmm_init( process_t * process ) 57 //////////////////////////////////////////////////////////////////////////////////////////// 58 // This static function is called by the vmm_create_vseg() function, and implements 59 // the VMM STACK specific allocator. 60 //////////////////////////////////////////////////////////////////////////////////////////// 61 // @ vmm : [in] pointer on VMM. 62 // @ ltid : [in] requested slot == local user thread identifier. 63 // @ vpn_base : [out] first allocated page 64 // @ vpn_size : [out] number of allocated pages 65 //////////////////////////////////////////////////////////////////////////////////////////// 66 static void vmm_stack_alloc( vmm_t * vmm, 67 ltid_t ltid, 68 vpn_t * vpn_base, 69 vpn_t * vpn_size ) 59 70 { 60 error_t error; 71 72 // check ltid argument 73 assert( (ltid <= ((CONFIG_VMM_VSPACE_SIZE - CONFIG_VMM_STACK_BASE) / CONFIG_VMM_STACK_SIZE)), 74 "slot index %d too large for an user stack vseg", ltid ); 75 76 // get stack allocator pointer 77 stack_mgr_t * mgr = &vmm->stack_mgr; 78 79 // get lock on stack allocator 80 busylock_acquire( &mgr->lock ); 81 82 // check requested slot is available 83 assert( (bitmap_state( &mgr->bitmap , ltid ) == false), 84 "slot index %d already allocated", ltid ); 85 86 // update bitmap 87 bitmap_set( &mgr->bitmap , ltid ); 88 89 // release lock on stack allocator 90 busylock_release( &mgr->lock ); 91 92 // returns vpn_base, vpn_size (first page non allocated) 93 *vpn_base = mgr->vpn_base + ltid * CONFIG_VMM_STACK_SIZE + 1; 94 *vpn_size = CONFIG_VMM_STACK_SIZE - 1; 95 96 } // end vmm_stack_alloc() 97 98 //////////////////////////////////////////////////////////////////////////////////////////// 99 // This static function is called by the vmm_remove_vseg() function, and implements 100 // the VMM STACK specific desallocator. 101 //////////////////////////////////////////////////////////////////////////////////////////// 102 // @ vmm : [in] pointer on VMM. 103 // @ vseg : [in] pointer on released vseg. 104 //////////////////////////////////////////////////////////////////////////////////////////// 105 static void vmm_stack_free( vmm_t * vmm, 106 vseg_t * vseg ) 107 { 108 // get stack allocator pointer 109 stack_mgr_t * mgr = &vmm->stack_mgr; 110 111 // compute slot index 112 uint32_t index = (vseg->vpn_base - 1 - mgr->vpn_base) / CONFIG_VMM_STACK_SIZE; 113 114 // check index 115 assert( (index <= ((CONFIG_VMM_VSPACE_SIZE - CONFIG_VMM_STACK_BASE) / CONFIG_VMM_STACK_SIZE)), 116 "slot index %d too large for an user stack vseg", index ); 117 118 // check released slot is allocated 119 assert( (bitmap_state( &mgr->bitmap , index ) == true), 120 "released slot index %d non allocated", index ); 121 122 // get lock on stack allocator 123 busylock_acquire( &mgr->lock ); 124 125 // update stacks_bitmap 126 bitmap_clear( &mgr->bitmap , index ); 127 128 // release lock on stack allocator 129 busylock_release( &mgr->lock ); 130 131 } // end vmm_stack_free() 132 133 //////////////////////////////////////////////////////////////////////////////////////////// 134 // This static function is called by the vmm_create_vseg() function, and implements 135 // the VMM MMAP specific allocator. 136 //////////////////////////////////////////////////////////////////////////////////////////// 137 // @ vmm : [in] pointer on VMM. 138 // @ npages : [in] requested number of pages. 139 // @ vpn_base : [out] first allocated page. 140 // @ vpn_size : [out] actual number of allocated pages. 141 //////////////////////////////////////////////////////////////////////////////////////////// 142 static error_t vmm_mmap_alloc( vmm_t * vmm, 143 vpn_t npages, 144 vpn_t * vpn_base, 145 vpn_t * vpn_size ) 146 { 147 uint32_t order; 148 xptr_t vseg_xp; 149 vseg_t * vseg; 150 vpn_t base; 151 vpn_t size; 152 vpn_t free; 153 154 #if DEBUG_VMM_MMAP_ALLOC 155 thread_t * this = CURRENT_THREAD; 156 uint32_t cycle = (uint32_t)hal_get_cycles(); 157 if( DEBUG_VMM_MMAP_ALLOC < cycle ) 158 printk("\n[%s] thread[%x,%x] enter / cycle %d\n", 159 __FUNCTION__, this->process->pid, this->trdid, cycle ); 160 #endif 161 162 // number of allocated pages must be power of 2 163 // compute actual size and order 164 size = POW2_ROUNDUP( npages ); 165 order = bits_log2( size ); 166 167 // get mmap allocator pointer 168 mmap_mgr_t * mgr = &vmm->mmap_mgr; 169 170 // build extended pointer on root of zombi_list[order] 171 xptr_t root_xp = XPTR( local_cxy , &mgr->zombi_list[order] ); 172 173 // take lock protecting zombi_lists 174 busylock_acquire( &mgr->lock ); 175 176 // get vseg from zombi_list or from mmap zone 177 if( xlist_is_empty( root_xp ) ) // from mmap zone 178 { 179 // check overflow 180 free = mgr->first_free_vpn; 181 if( (free + size) > mgr->vpn_size ) return -1; 182 183 // update MMAP allocator 184 mgr->first_free_vpn += size; 185 186 // compute base 187 base = free; 188 } 189 else // from zombi_list 190 { 191 // get pointer on zombi vseg from zombi_list 192 vseg_xp = XLIST_FIRST( root_xp , vseg_t , xlist ); 193 vseg = GET_PTR( vseg_xp ); 194 195 // remove vseg from free-list 196 xlist_unlink( XPTR( local_cxy , &vseg->xlist ) ); 197 198 // compute base 199 base = vseg->vpn_base; 200 } 201 202 // release lock 203 busylock_release( &mgr->lock ); 204 205 #if DEBUG_VMM_MMAP_ALLOC 206 cycle = (uint32_t)hal_get_cycles(); 207 if( DEBUG_VMM_DESTROY < cycle ) 208 printk("\n[%s] thread[%x,%x] exit / vpn_base %x / vpn_size %x / cycle %d\n", 209 __FUNCTION__, this->process->pid, this->trdid, base, size, cycle ); 210 #endif 211 212 // returns vpn_base, vpn_size 213 *vpn_base = base; 214 *vpn_size = size; 215 return 0; 216 217 } // end vmm_mmap_alloc() 218 219 //////////////////////////////////////////////////////////////////////////////////////////// 220 // This static function is called by the vmm_remove_vseg() function, and implements 221 // the VMM MMAP specific desallocator. 222 //////////////////////////////////////////////////////////////////////////////////////////// 223 // @ vmm : [in] pointer on VMM. 224 // @ vseg : [in] pointer on released vseg. 225 //////////////////////////////////////////////////////////////////////////////////////////// 226 static void vmm_mmap_free( vmm_t * vmm, 227 vseg_t * vseg ) 228 { 229 // get pointer on mmap allocator 230 mmap_mgr_t * mgr = &vmm->mmap_mgr; 231 232 // compute zombi_list order 233 uint32_t order = bits_log2( vseg->vpn_size ); 234 235 // take lock protecting zombi lists 236 busylock_acquire( &mgr->lock ); 237 238 // update relevant zombi_list 239 xlist_add_first( XPTR( local_cxy , &mgr->zombi_list[order] ), 240 XPTR( local_cxy , &vseg->xlist ) ); 241 242 // release lock 243 busylock_release( &mgr->lock ); 244 245 } // end of vmm_mmap_free() 246 247 //////////////////////////////////////////////////////////////////////////////////////////// 248 // This static function registers one vseg in the VSL of a local process descriptor. 249 //////////////////////////////////////////////////////////////////////////////////////////// 250 // vmm : [in] pointer on VMM. 251 // vseg : [in] pointer on vseg. 252 //////////////////////////////////////////////////////////////////////////////////////////// 253 void vmm_attach_vseg_to_vsl( vmm_t * vmm, 254 vseg_t * vseg ) 255 { 256 // update vseg descriptor 257 vseg->vmm = vmm; 258 259 // increment vsegs number 260 vmm->vsegs_nr++; 261 262 // add vseg in vmm list 263 xlist_add_last( XPTR( local_cxy , &vmm->vsegs_root ), 264 XPTR( local_cxy , &vseg->xlist ) ); 265 266 } // end vmm_attach_vseg_from_vsl() 267 268 //////////////////////////////////////////////////////////////////////////////////////////// 269 // This static function removes one vseg from the VSL of a local process descriptor. 270 //////////////////////////////////////////////////////////////////////////////////////////// 271 // vmm : [in] pointer on VMM. 272 // vseg : [in] pointer on vseg. 273 //////////////////////////////////////////////////////////////////////////////////////////// 274 void vmm_detach_vseg_from_vsl( vmm_t * vmm, 275 vseg_t * vseg ) 276 { 277 // update vseg descriptor 278 vseg->vmm = NULL; 279 280 // decrement vsegs number 281 vmm->vsegs_nr--; 282 283 // remove vseg from VSL 284 xlist_unlink( XPTR( local_cxy , &vseg->xlist ) ); 285 286 } // end vmm_detach_from_vsl() 287 288 289 290 291 //////////////////////////////////////////// 292 error_t vmm_user_init( process_t * process ) 293 { 61 294 vseg_t * vseg_args; 62 295 vseg_t * vseg_envs; … … 65 298 uint32_t i; 66 299 67 #if DEBUG_VMM_ INIT300 #if DEBUG_VMM_USER_INIT 68 301 thread_t * this = CURRENT_THREAD; 69 302 uint32_t cycle = (uint32_t)hal_get_cycles(); 70 if( DEBUG_VMM_ INIT )303 if( DEBUG_VMM_USER_INIT ) 71 304 printk("\n[%s] thread[%x,%x] enter for process %x in cluster %x / cycle %d\n", 72 305 __FUNCTION__ , this->process->pid, this->trdid, process->pid, local_cxy, cycle ); … … 76 309 vmm_t * vmm = &process->vmm; 77 310 78 // initialize VSL (empty) 79 vmm->vsegs_nr = 0; 80 xlist_root_init( XPTR( local_cxy , &vmm->vsegs_root ) ); 81 remote_rwlock_init( XPTR( local_cxy , &vmm->vsegs_lock ) , LOCK_VMM_VSL ); 82 311 // check UTILS zone 83 312 assert( ((CONFIG_VMM_ARGS_SIZE + CONFIG_VMM_ENVS_SIZE) <= 84 313 (CONFIG_VMM_ELF_BASE - CONFIG_VMM_UTILS_BASE)) , 85 314 "UTILS zone too small\n" ); 86 315 316 // check STACK zone 87 317 assert( ((CONFIG_VMM_STACK_SIZE * CONFIG_THREADS_MAX_PER_CLUSTER) <= 88 318 (CONFIG_VMM_VSPACE_SIZE - CONFIG_VMM_STACK_BASE)) , 89 319 "STACK zone too small\n"); 90 320 91 // register argsvseg in VSL321 // register "args" vseg in VSL 92 322 base = CONFIG_VMM_UTILS_BASE << CONFIG_PPM_PAGE_SHIFT; 93 323 size = CONFIG_VMM_ARGS_SIZE << CONFIG_PPM_PAGE_SHIFT; … … 101 331 XPTR_NULL, // mapper_xp unused 102 332 local_cxy ); 103 104 333 if( vseg_args == NULL ) 105 334 { … … 110 339 vmm->args_vpn_base = base; 111 340 112 // register the envsvseg in VSL341 // register "envs" vseg in VSL 113 342 base = (CONFIG_VMM_UTILS_BASE + CONFIG_VMM_ARGS_SIZE) << CONFIG_PPM_PAGE_SHIFT; 114 343 size = CONFIG_VMM_ENVS_SIZE << CONFIG_PPM_PAGE_SHIFT; … … 122 351 XPTR_NULL, // mapper_xp unused 123 352 local_cxy ); 124 125 353 if( vseg_envs == NULL ) 126 354 { … … 130 358 131 359 vmm->envs_vpn_base = base; 132 133 // create GPT (empty)134 error = hal_gpt_create( &vmm->gpt );135 136 if( error )137 {138 printk("\n[ERROR] in %s : cannot create GPT\n", __FUNCTION__ );139 return -1;140 }141 142 // initialize GPT lock143 remote_rwlock_init( XPTR( local_cxy , &vmm->gpt_lock ) , LOCK_VMM_GPT );144 145 // update process VMM with kernel vsegs as required by the hardware architecture146 error = hal_vmm_kernel_update( process );147 148 if( error )149 {150 printk("\n[ERROR] in %s : cannot update GPT for kernel vsegs\n", __FUNCTION__ );151 return -1;152 }153 360 154 361 // initialize STACK allocator … … 162 369 vmm->mmap_mgr.first_free_vpn = CONFIG_VMM_HEAP_BASE; 163 370 busylock_init( &vmm->mmap_mgr.lock , LOCK_VMM_MMAP ); 164 for( i = 0 ; i < 32 ; i++ ) list_root_init( &vmm->mmap_mgr.zombi_list[i] ); 371 for( i = 0 ; i < 32 ; i++ ) 372 { 373 xlist_root_init( XPTR( local_cxy , &vmm->mmap_mgr.zombi_list[i] ) ); 374 } 165 375 166 376 // initialize instrumentation counters … … 169 379 hal_fence(); 170 380 171 #if DEBUG_VMM_ INIT381 #if DEBUG_VMM_USER_INIT 172 382 cycle = (uint32_t)hal_get_cycles(); 173 if( DEBUG_VMM_ INIT )383 if( DEBUG_VMM_USER_INIT ) 174 384 printk("\n[%s] thread[%x,%x] exit for process %x in cluster %x / cycle %d\n", 175 385 __FUNCTION__, this->process->pid, this->trdid, process->pid, local_cxy, cycle ); … … 178 388 return 0; 179 389 180 } // end vmm_init() 181 390 } // end vmm_user_init() 182 391 183 392 ////////////////////////////////////////// 184 void vmm_attach_vseg_to_vsl( vmm_t * vmm, 185 vseg_t * vseg ) 393 void vmm_user_reset( process_t * process ) 186 394 { 187 // build extended pointer on rwlock protecting VSL 188 xptr_t lock_xp = XPTR( local_cxy , &vmm->vsegs_lock ); 189 190 // get rwlock in write mode 191 remote_rwlock_wr_acquire( lock_xp ); 192 193 // update vseg descriptor 194 vseg->vmm = vmm; 195 196 // increment vsegs number 197 vmm->vsegs_nr++; 198 199 // add vseg in vmm list 200 xlist_add_last( XPTR( local_cxy , &vmm->vsegs_root ), 201 XPTR( local_cxy , &vseg->xlist ) ); 202 203 // release rwlock in write mode 204 remote_rwlock_wr_release( lock_xp ); 205 } 206 207 //////////////////////////////////////////// 208 void vmm_detach_vseg_from_vsl( vmm_t * vmm, 209 vseg_t * vseg ) 210 { 211 // get vseg type 212 uint32_t type = vseg->type; 213 214 // build extended pointer on rwlock protecting VSL 215 xptr_t lock_xp = XPTR( local_cxy , &vmm->vsegs_lock ); 216 217 // get rwlock in write mode 218 remote_rwlock_wr_acquire( lock_xp ); 219 220 // update vseg descriptor 221 vseg->vmm = NULL; 222 223 // remove vseg from VSL 224 xlist_unlink( XPTR( local_cxy , &vseg->xlist ) ); 225 226 // release rwlock in write mode 227 remote_rwlock_wr_release( lock_xp ); 228 229 // release the stack slot to VMM stack allocator if STACK type 230 if( type == VSEG_TYPE_STACK ) 231 { 232 // get pointer on stack allocator 233 stack_mgr_t * mgr = &vmm->stack_mgr; 234 235 // compute slot index 236 uint32_t index = ((vseg->vpn_base - mgr->vpn_base - 1) / CONFIG_VMM_STACK_SIZE); 237 238 // update stacks_bitmap 239 busylock_acquire( &mgr->lock ); 240 bitmap_clear( &mgr->bitmap , index ); 241 busylock_release( &mgr->lock ); 242 } 243 244 // release the vseg to VMM mmap allocator if MMAP type 245 if( (type == VSEG_TYPE_ANON) || (type == VSEG_TYPE_FILE) || (type == VSEG_TYPE_REMOTE) ) 246 { 247 // get pointer on mmap allocator 248 mmap_mgr_t * mgr = &vmm->mmap_mgr; 249 250 // compute zombi_list index 251 uint32_t index = bits_log2( vseg->vpn_size ); 252 253 // update zombi_list 254 busylock_acquire( &mgr->lock ); 255 list_add_first( &mgr->zombi_list[index] , &vseg->zlist ); 256 busylock_release( &mgr->lock ); 257 } 258 259 // release physical memory allocated for vseg if no MMAP and no kernel type 260 if( (type != VSEG_TYPE_ANON) && (type != VSEG_TYPE_FILE) && (type != VSEG_TYPE_REMOTE) && 261 (type != VSEG_TYPE_KCODE) && (type != VSEG_TYPE_KDATA) && (type != VSEG_TYPE_KDEV) ) 262 { 263 vseg_free( vseg ); 264 } 265 266 } // end vmm_remove_vseg_from_vsl() 395 xptr_t vseg_xp; 396 vseg_t * vseg; 397 vseg_type_t vseg_type; 398 399 #if DEBUG_VMM_USER_RESET 400 uint32_t cycle = (uint32_t)hal_get_cycles(); 401 thread_t * this = CURRENT_THREAD; 402 if( DEBUG_VMM_USER_RESET < cycle ) 403 printk("\n[%s] thread[%x,%x] enter for process %x in cluster %x / cycle %d\n", 404 __FUNCTION__, this->process->pid, this->trdid, process->pid, local_cxy, cycle ); 405 #endif 406 407 #if (DEBUG_VMM_USER_RESET & 1 ) 408 if( DEBUG_VMM_USER_RESET < cycle ) 409 hal_vmm_display( process , true ); 410 #endif 411 412 // get pointer on local VMM 413 vmm_t * vmm = &process->vmm; 414 415 // build extended pointer on VSL root and VSL lock 416 xptr_t root_xp = XPTR( local_cxy , &vmm->vsegs_root ); 417 xptr_t lock_xp = XPTR( local_cxy , &vmm->vsl_lock ); 418 419 // take the VSL lock 420 remote_rwlock_wr_acquire( lock_xp ); 421 422 // scan the VSL to delete all non kernel vsegs 423 // (we don't use a FOREACH in case of item deletion) 424 xptr_t iter_xp; 425 xptr_t next_xp; 426 for( iter_xp = hal_remote_l64( root_xp ) ; 427 iter_xp != root_xp ; 428 iter_xp = next_xp ) 429 { 430 // save extended pointer on next item in xlist 431 next_xp = hal_remote_l64( iter_xp ); 432 433 // get pointers on current vseg in VSL 434 vseg_xp = XLIST_ELEMENT( iter_xp , vseg_t , xlist ); 435 vseg = GET_PTR( vseg_xp ); 436 vseg_type = vseg->type; 437 438 #if( DEBUG_VMM_USER_RESET & 1 ) 439 if( DEBUG_VMM_USER_RESET < cycle ) 440 printk("\n[%s] found %s vseg / vpn_base %x / vpn_size %d\n", 441 __FUNCTION__ , vseg_type_str( vseg->type ), vseg->vpn_base, vseg->vpn_size ); 442 #endif 443 // delete non kernel vseg 444 if( (vseg_type != VSEG_TYPE_KCODE) && 445 (vseg_type != VSEG_TYPE_KDATA) && 446 (vseg_type != VSEG_TYPE_KDEV ) ) 447 { 448 // remove vseg from VSL 449 vmm_remove_vseg( process , vseg ); 450 451 #if( DEBUG_VMM_USER_RESET & 1 ) 452 if( DEBUG_VMM_USER_RESET < cycle ) 453 printk("\n[%s] %s vseg deleted / vpn_base %x / vpn_size %d\n", 454 __FUNCTION__ , vseg_type_str( vseg->type ), vseg->vpn_base, vseg->vpn_size ); 455 #endif 456 } 457 else 458 { 459 460 #if( DEBUG_VMM_USER_RESET & 1 ) 461 if( DEBUG_VMM_USER_RESET < cycle ) 462 printk("\n[%s] keep %s vseg / vpn_base %x / vpn_size %d\n", 463 __FUNCTION__ , vseg_type_str( vseg->type ), vseg->vpn_base, vseg->vpn_size ); 464 #endif 465 } 466 } // end loop on vsegs in VSL 467 468 // release the VSL lock 469 remote_rwlock_wr_release( lock_xp ); 470 471 // FIXME il faut gérer les process copies... 472 473 #if DEBUG_VMM_USER_RESET 474 cycle = (uint32_t)hal_get_cycles(); 475 if( DEBUG_VMM_USER_RESET < cycle ) 476 printk("\n[%s] thread[%x,%x] exit for process %x in cluster %x / cycle %d\n", 477 __FUNCTION__, this->process->pid, this->trdid, process->pid, local_cxy , cycle ); 478 #endif 479 480 } // end vmm_user_reset() 267 481 268 482 //////////////////////////////////////////////// … … 507 721 cxy_t page_cxy; 508 722 xptr_t forks_xp; // extended pointer on forks counter in page descriptor 509 xptr_t lock_xp; // extended pointer on lock protecting the forks counter510 723 xptr_t parent_root_xp; 511 724 bool_t mapped; … … 528 741 child_vmm = &child_process->vmm; 529 742 530 // get extended pointer on lock protecting the parent VSL 531 parent_lock_xp = XPTR( parent_cxy , &parent_vmm->vsegs_lock ); 532 533 // initialize the lock protecting the child VSL 534 remote_rwlock_init( XPTR( local_cxy , &child_vmm->vsegs_lock ), LOCK_VMM_STACK ); 743 // initialize the locks protecting the child VSL and GPT 744 remote_rwlock_init( XPTR( local_cxy , &child_vmm->gpt_lock ) , LOCK_VMM_GPT ); 745 remote_rwlock_init( XPTR( local_cxy , &child_vmm->vsl_lock ) , LOCK_VMM_VSL ); 535 746 536 747 // initialize the child VSL as empty … … 538 749 child_vmm->vsegs_nr = 0; 539 750 540 // create thechild GPT751 // create an empty child GPT 541 752 error = hal_gpt_create( &child_vmm->gpt ); 542 543 753 if( error ) 544 754 { … … 547 757 } 548 758 549 // build extended pointer on parent VSL 759 // build extended pointer on parent VSL root and lock 550 760 parent_root_xp = XPTR( parent_cxy , &parent_vmm->vsegs_root ); 761 parent_lock_xp = XPTR( parent_cxy , &parent_vmm->vsl_lock ); 551 762 552 763 // take the lock protecting the parent VSL in read mode … … 556 767 XLIST_FOREACH( parent_root_xp , iter_xp ) 557 768 { 558 // get local and extendedpointers on current parent vseg769 // get pointers on current parent vseg 559 770 parent_vseg_xp = XLIST_ELEMENT( iter_xp , vseg_t , xlist ); 560 771 parent_vseg = GET_PTR( parent_vseg_xp ); … … 587 798 vseg_init_from_ref( child_vseg , parent_vseg_xp ); 588 799 800 // build extended pointer on VSL lock 801 xptr_t lock_xp = XPTR( local_cxy , &child_vmm->vsl_lock ); 802 803 // take the VSL lock in write mode 804 remote_rwlock_wr_acquire( lock_xp ); 805 589 806 // register child vseg in child VSL 590 807 vmm_attach_vseg_to_vsl( child_vmm , child_vseg ); 808 809 // release the VSL lock 810 remote_rwlock_wr_release( lock_xp ); 591 811 592 812 #if DEBUG_VMM_FORK_COPY … … 597 817 hal_remote_l32( XPTR( parent_cxy , &parent_vseg->vpn_base ) ) , cycle ); 598 818 #endif 599 600 // copy DATA, MMAP, REMOTE, FILE parent GPT entries to child GPT 819 // copy DATA, ANON, REMOTE, FILE parent GPT entries to child GPT 601 820 if( type != VSEG_TYPE_CODE ) 602 821 { 603 // activate the COW for DATA, MMAP, REMOTE vsegs only822 // activate the COW for DATA, ANON, REMOTE vsegs only 604 823 cow = ( type != VSEG_TYPE_FILE ); 605 824 … … 611 830 { 612 831 error = hal_gpt_pte_copy( &child_vmm->gpt, 832 vpn, 613 833 XPTR( parent_cxy , &parent_vmm->gpt ), 614 834 vpn, … … 677 897 child_vmm->mmap_mgr.vpn_size = CONFIG_VMM_STACK_BASE - CONFIG_VMM_HEAP_BASE; 678 898 child_vmm->mmap_mgr.first_free_vpn = CONFIG_VMM_HEAP_BASE; 679 for( i = 0 ; i < 32 ; i++ ) list_root_init( &child_vmm->mmap_mgr.zombi_list[i] ); 899 for( i = 0 ; i < 32 ; i++ ) 900 { 901 xlist_root_init( XPTR( local_cxy , &child_vmm->mmap_mgr.zombi_list[i] ) ); 902 } 680 903 681 904 // initialize instrumentation counters … … 726 949 vmm_t * vmm = &process->vmm; 727 950 728 // get extended pointer on VSL root and VSL lock 729 xptr_t root_xp = XPTR( local_cxy , &vmm->vsegs_root ); 951 // build extended pointer on VSL root, VSL lock and GPT lock 952 xptr_t vsl_root_xp = XPTR( local_cxy , &vmm->vsegs_root ); 953 xptr_t vsl_lock_xp = XPTR( local_cxy , &vmm->vsl_lock ); 954 xptr_t gpt_lock_xp = XPTR( local_cxy , &vmm->gpt_lock ); 955 956 // take the VSL lock 957 remote_rwlock_wr_acquire( vsl_lock_xp ); 730 958 731 959 // scan the VSL to delete all registered vsegs 732 // (don't use a FOREACH for item deletion in xlist) 733 734 while( !xlist_is_empty( root_xp ) ) 960 // (we don't use a FOREACH in case of item deletion) 961 xptr_t iter_xp; 962 xptr_t next_xp; 963 for( iter_xp = hal_remote_l64( vsl_root_xp ) ; 964 iter_xp != vsl_root_xp ; 965 iter_xp = next_xp ) 735 966 { 736 // get pointer on first vseg in VSL 737 vseg_xp = XLIST_FIRST( root_xp , vseg_t , xlist ); 738 vseg = GET_PTR( vseg_xp ); 967 // save extended pointer on next item in xlist 968 next_xp = hal_remote_l64( iter_xp ); 969 970 // get pointers on current vseg in VSL 971 vseg_xp = XLIST_ELEMENT( iter_xp , vseg_t , xlist ); 972 vseg = GET_PTR( vseg_xp ); 739 973 740 974 // delete vseg and release physical pages 741 vmm_ delete_vseg( process->pid , vseg->min);975 vmm_remove_vseg( process , vseg ); 742 976 743 977 #if( DEBUG_VMM_DESTROY & 1 ) … … 749 983 } 750 984 751 // remove all vsegs from zombi_lists in MMAP allocator 985 // release the VSL lock 986 remote_rwlock_wr_release( vsl_lock_xp ); 987 988 // remove all registered MMAP vsegs 989 // from zombi_lists in MMAP allocator 752 990 uint32_t i; 753 991 for( i = 0 ; i<32 ; i++ ) 754 992 { 755 while( !list_is_empty( &vmm->mmap_mgr.zombi_list[i] ) ) 993 // build extended pointer on zombi_list[i] 994 xptr_t root_xp = XPTR( local_cxy , &vmm->mmap_mgr.zombi_list[i] ); 995 996 // scan zombi_list[i] 997 while( !xlist_is_empty( root_xp ) ) 756 998 { 757 vseg = LIST_FIRST( &vmm->mmap_mgr.zombi_list[i] , vseg_t , zlist ); 999 vseg_xp = XLIST_FIRST( root_xp , vseg_t , xlist ); 1000 vseg = GET_PTR( vseg_xp ); 758 1001 759 1002 #if( DEBUG_VMM_DESTROY & 1 ) … … 765 1008 vseg->vmm = NULL; 766 1009 767 // remove vseg from xlist1010 // remove vseg from zombi_list 768 1011 xlist_unlink( XPTR( local_cxy , &vseg->xlist ) ); 769 1012 … … 779 1022 } 780 1023 1024 // take the GPT lock 1025 remote_rwlock_wr_acquire( gpt_lock_xp ); 1026 781 1027 // release memory allocated to the GPT itself 782 1028 hal_gpt_destroy( &vmm->gpt ); 1029 1030 // release the GPT lock 1031 remote_rwlock_wr_release( gpt_lock_xp ); 783 1032 784 1033 #if DEBUG_VMM_DESTROY … … 816 1065 } // end vmm_check_conflict() 817 1066 818 //////////////////////////////////////////////////////////////////////////////////////////// 819 // This static function is called by the vmm_create_vseg() function, and implements 820 // the VMM stack_vseg specific allocator. 821 //////////////////////////////////////////////////////////////////////////////////////////// 822 // @ vmm : pointer on VMM. 823 // @ vpn_base : (return value) first allocated page 824 // @ vpn_size : (return value) number of allocated pages 825 //////////////////////////////////////////////////////////////////////////////////////////// 826 static error_t vmm_stack_alloc( vmm_t * vmm, 827 vpn_t * vpn_base, 828 vpn_t * vpn_size ) 829 { 830 // get stack allocator pointer 831 stack_mgr_t * mgr = &vmm->stack_mgr; 832 833 // get lock on stack allocator 834 busylock_acquire( &mgr->lock ); 835 836 // get first free slot index in bitmap 837 int32_t index = bitmap_ffc( &mgr->bitmap , 4 ); 838 if( (index < 0) || (index > 31) ) 839 { 840 busylock_release( &mgr->lock ); 841 return 0xFFFFFFFF; 842 } 843 844 // update bitmap 845 bitmap_set( &mgr->bitmap , index ); 846 847 // release lock on stack allocator 848 busylock_release( &mgr->lock ); 849 850 // returns vpn_base, vpn_size (one page non allocated) 851 *vpn_base = mgr->vpn_base + index * CONFIG_VMM_STACK_SIZE + 1; 852 *vpn_size = CONFIG_VMM_STACK_SIZE - 1; 853 return 0; 854 855 } // end vmm_stack_alloc() 856 857 //////////////////////////////////////////////////////////////////////////////////////////// 858 // This static function is called by the vmm_create_vseg() function, and implements 859 // the VMM MMAP specific allocator. 860 //////////////////////////////////////////////////////////////////////////////////////////// 861 // @ vmm : [in] pointer on VMM. 862 // @ npages : [in] requested number of pages. 863 // @ vpn_base : [out] first allocated page. 864 // @ vpn_size : [out] actual number of allocated pages. 865 //////////////////////////////////////////////////////////////////////////////////////////// 866 static error_t vmm_mmap_alloc( vmm_t * vmm, 867 vpn_t npages, 868 vpn_t * vpn_base, 869 vpn_t * vpn_size ) 870 { 871 uint32_t index; 872 vseg_t * vseg; 873 vpn_t base; 874 vpn_t size; 875 vpn_t free; 876 877 #if DEBUG_VMM_MMAP_ALLOC 878 thread_t * this = CURRENT_THREAD; 879 uint32_t cycle = (uint32_t)hal_get_cycles(); 880 if( DEBUG_VMM_MMAP_ALLOC < cycle ) 881 printk("\n[%s] thread[%x,%x] enter / cycle %d\n", 882 __FUNCTION__, this->process->pid, this->trdid, cycle ); 883 #endif 884 885 // vseg size must be power of 2 886 // compute actual size and index in zombi_list array 887 size = POW2_ROUNDUP( npages ); 888 index = bits_log2( size ); 889 890 // get mmap allocator pointer 891 mmap_mgr_t * mgr = &vmm->mmap_mgr; 892 893 // get lock on mmap allocator 894 busylock_acquire( &mgr->lock ); 895 896 // get vseg from zombi_list or from mmap zone 897 if( list_is_empty( &mgr->zombi_list[index] ) ) // from mmap zone 898 { 899 // check overflow 900 free = mgr->first_free_vpn; 901 if( (free + size) > mgr->vpn_size ) return -1; 902 903 // update MMAP allocator 904 mgr->first_free_vpn += size; 905 906 // compute base 907 base = free; 908 } 909 else // from zombi_list 910 { 911 // get pointer on zombi vseg from zombi_list 912 vseg = LIST_FIRST( &mgr->zombi_list[index] , vseg_t , zlist ); 913 914 // remove vseg from free-list 915 list_unlink( &vseg->zlist ); 916 917 // compute base 918 base = vseg->vpn_base; 919 } 920 921 // release lock on mmap allocator 922 busylock_release( &mgr->lock ); 923 924 #if DEBUG_VMM_MMAP_ALLOC 925 cycle = (uint32_t)hal_get_cycles(); 926 if( DEBUG_VMM_DESTROY < cycle ) 927 printk("\n[%s] thread[%x,%x] exit / vpn_base %x / vpn_size %x / cycle %d\n", 928 __FUNCTION__, this->process->pid, this->trdid, base, size, cycle ); 929 #endif 930 931 // returns vpn_base, vpn_size 932 *vpn_base = base; 933 *vpn_size = size; 934 return 0; 935 936 } // end vmm_mmap_alloc() 1067 937 1068 938 1069 //////////////////////////////////////////////// … … 968 1099 { 969 1100 // get vpn_base and vpn_size from STACK allocator 970 error = vmm_stack_alloc( vmm , &vpn_base , &vpn_size ); 971 if( error ) 972 { 973 printk("\n[ERROR] in %s : no space for stack vseg / process %x in cluster %x\n", 974 __FUNCTION__ , process->pid , local_cxy ); 975 return NULL; 976 } 1101 vmm_stack_alloc( vmm , base , &vpn_base , &vpn_size ); 977 1102 978 1103 // compute vseg base and size from vpn_base and vpn_size … … 1072 1197 cxy ); 1073 1198 1199 // build extended pointer on VSL lock 1200 xptr_t lock_xp = XPTR( local_cxy , &vmm->vsl_lock ); 1201 1202 // take the VSL lock in write mode 1203 remote_rwlock_wr_acquire( lock_xp ); 1204 1074 1205 // attach vseg to VSL 1075 1206 vmm_attach_vseg_to_vsl( vmm , vseg ); 1207 1208 // release the VSL lock 1209 remote_rwlock_wr_release( lock_xp ); 1076 1210 1077 1211 #if DEBUG_VMM_CREATE_VSEG … … 1086 1220 } // vmm_create_vseg() 1087 1221 1088 /////////////////////////////////// 1089 void vmm_delete_vseg( pid_t pid, 1090 intptr_t vaddr ) 1222 1223 ////////////////////////////////////////// 1224 void vmm_remove_vseg( process_t * process, 1225 vseg_t * vseg ) 1091 1226 { 1092 process_t * process; // local pointer on local process 1093 vmm_t * vmm; // local pointer on local process VMM 1094 vseg_t * vseg; // local pointer on local vseg containing vaddr 1095 gpt_t * gpt; // local pointer on local process GPT 1227 vmm_t * vmm; // local pointer on process VMM 1228 bool_t is_ref; // local process is reference process 1229 uint32_t vseg_type; // vseg type 1096 1230 vpn_t vpn; // VPN of current PTE 1097 1231 vpn_t vpn_min; // VPN of first PTE … … 1103 1237 cxy_t page_cxy; // page descriptor cluster 1104 1238 page_t * page_ptr; // page descriptor pointer 1105 xptr_t forks_xp; // extended pointer on pending forks counter 1106 xptr_t lock_xp; // extended pointer on lock protecting forks counter 1107 uint32_t forks; // actual number of pendinf forks 1108 uint32_t vseg_type; // vseg type 1109 1110 #if DEBUG_VMM_DELETE_VSEG 1111 uint32_t cycle = (uint32_t)hal_get_cycles(); 1112 thread_t * this = CURRENT_THREAD; 1113 if( DEBUG_VMM_DELETE_VSEG < cycle ) 1114 printk("\n[%s] thread[%x,%x] enter / process %x / vaddr %x / cycle %d\n", 1115 __FUNCTION__, this->process->pid, this->trdid, pid, vaddr, cycle ); 1116 #endif 1117 1118 // get local pointer on local process descriptor 1119 process = cluster_get_local_process_from_pid( pid ); 1120 1121 if( process == NULL ) 1122 { 1123 printk("\n[ERRORR] in %s : cannot get local process descriptor\n", 1124 __FUNCTION__ ); 1125 return; 1126 } 1127 1128 // get pointers on local process VMM an GPT 1239 xptr_t count_xp; // extended pointer on page refcount 1240 uint32_t count; // current value of page refcount 1241 1242 // check arguments 1243 assert( (process != NULL), "process argument is NULL" ); 1244 assert( (vseg != NULL), "vseg argument is NULL" ); 1245 1246 // compute is_ref 1247 is_ref = (GET_CXY( process->ref_xp ) == local_cxy); 1248 1249 // get pointers on local process VMM 1129 1250 vmm = &process->vmm; 1130 gpt = &process->vmm.gpt;1131 1132 // get local pointer on vseg containing vaddr1133 vseg = vmm_vseg_from_vaddr( vmm , vaddr );1134 1135 if( vseg == NULL )1136 {1137 printk("\n[ERRORR] in %s : cannot get vseg descriptor\n",1138 __FUNCTION__ );1139 return;1140 }1141 1251 1142 1252 // get relevant vseg infos … … 1145 1255 vpn_max = vpn_min + vseg->vpn_size; 1146 1256 1147 // loop to invalidate all vseg PTEs in GPT 1257 #if DEBUG_VMM_REMOVE_VSEG 1258 uint32_t cycle = (uint32_t)hal_get_cycles(); 1259 thread_t * this = CURRENT_THREAD; 1260 if( DEBUG_VMM_REMOVE_VSEG < cycle ) 1261 printk("\n[%s] thread[%x,%x] enter / process %x / %s / base %x / cycle %d\n", 1262 __FUNCTION__, this->process->pid, this->trdid, 1263 process->pid, vseg_type_str(vseg->type), vseg->min, cycle ); 1264 #endif 1265 1266 // loop on PTEs in GPT 1148 1267 for( vpn = vpn_min ; vpn < vpn_max ; vpn++ ) 1149 1268 { 1150 // get ppn and attr from GPT entry1151 hal_gpt_get_pte( XPTR( local_cxy , gpt ) , vpn , &attr , &ppn );1152 1153 if( attr & GPT_MAPPED ) // entryis mapped1269 // get ppn and attr 1270 hal_gpt_get_pte( XPTR( local_cxy , &vmm->gpt ) , vpn , &attr , &ppn ); 1271 1272 if( attr & GPT_MAPPED ) // PTE is mapped 1154 1273 { 1155 1274 1156 #if( DEBUG_VMM_ DELETE_VSEG & 1 )1157 if( DEBUG_VMM_ DELETE_VSEG < cycle )1158 printk("- unmap vpn %x / ppn %x / vseg %s \n" , vpn , ppn, vseg_type_str(vseg->type) );1275 #if( DEBUG_VMM_REMOVE_VSEG & 1 ) 1276 if( DEBUG_VMM_REMOVE_VSEG < cycle ) 1277 printk("- unmap vpn %x / ppn %x / %s" , vpn , ppn, vseg_type_str(vseg_type) ); 1159 1278 #endif 1160 1279 // unmap GPT entry in local GPT 1161 hal_gpt_reset_pte( gpt , vpn ); 1162 1163 // the allocated page is not released to for kernel vseg 1164 if( (vseg_type != VSEG_TYPE_KCODE) && 1165 (vseg_type != VSEG_TYPE_KDATA) && 1166 (vseg_type != VSEG_TYPE_KDEV ) ) 1280 hal_gpt_reset_pte( &vmm->gpt , vpn ); 1281 1282 // get pointers on physical page descriptor 1283 page_xp = ppm_ppn2page( ppn ); 1284 page_cxy = GET_CXY( page_xp ); 1285 page_ptr = GET_PTR( page_xp ); 1286 1287 // decrement page refcount 1288 count_xp = XPTR( page_cxy , &page_ptr->refcount ); 1289 count = hal_remote_atomic_add( count_xp , -1 ); 1290 1291 // compute the ppn_release condition depending on vseg type 1292 bool_t ppn_release; 1293 if( (vseg_type == VSEG_TYPE_FILE) || 1294 (vseg_type == VSEG_TYPE_KCODE) || 1295 (vseg_type == VSEG_TYPE_KDATA) || 1296 (vseg_type == VSEG_TYPE_KDEV) ) 1167 1297 { 1168 // get extended pointer on physical page descriptor 1169 page_xp = ppm_ppn2page( ppn ); 1170 page_cxy = GET_CXY( page_xp ); 1171 page_ptr = GET_PTR( page_xp ); 1172 1173 // FIXME This code must be re-written, as the actual release depends on vseg type, 1174 // the reference cluster, the page refcount and/or the forks counter... 1175 1176 // get extended pointers on forks and lock fields 1177 forks_xp = XPTR( page_cxy , &page_ptr->forks ); 1178 lock_xp = XPTR( page_cxy , &page_ptr->lock ); 1179 1180 // get the lock protecting the page 1298 // no physical page release for FILE and KERNEL 1299 ppn_release = false; 1300 } 1301 else if( (vseg_type == VSEG_TYPE_CODE) || 1302 (vseg_type == VSEG_TYPE_STACK) ) 1303 { 1304 // always release physical page for private vsegs 1305 ppn_release = true; 1306 } 1307 else if( (vseg_type == VSEG_TYPE_ANON) || 1308 (vseg_type == VSEG_TYPE_REMOTE) ) 1309 { 1310 // release physical page if reference cluster 1311 ppn_release = is_ref; 1312 } 1313 else if( is_ref ) // vseg_type == DATA in reference cluster 1314 { 1315 // get extended pointers on forks and lock field in page descriptor 1316 xptr_t forks_xp = XPTR( page_cxy , &page_ptr->forks ); 1317 xptr_t lock_xp = XPTR( page_cxy , &page_ptr->lock ); 1318 1319 // take lock protecting "forks" counter 1181 1320 remote_busylock_acquire( lock_xp ); 1182 1321 1183 // get pending forks counter 1184 forks = hal_remote_l32( forks_xp ); 1185 1186 if( forks ) // decrement pending forks counter 1322 // get number of pending forks from page descriptor 1323 uint32_t forks = hal_remote_l32( forks_xp ); 1324 1325 // decrement pending forks counter if required 1326 if( forks ) hal_remote_atomic_add( forks_xp , -1 ); 1327 1328 // release lock protecting "forks" counter 1329 remote_busylock_release( lock_xp ); 1330 1331 // release physical page if forks == 0 1332 ppn_release = (forks == 0); 1333 } 1334 else // vseg_type == DATA not in reference cluster 1335 { 1336 // no physical page release if not in reference cluster 1337 ppn_release = false; 1338 } 1339 1340 // release physical page to relevant kmem when required 1341 if( ppn_release ) 1342 { 1343 if( page_cxy == local_cxy ) 1187 1344 { 1188 // update forks counter 1189 hal_remote_atomic_add( forks_xp , -1 ); 1190 1191 // release the lock protecting the page 1192 remote_busylock_release( lock_xp ); 1193 } 1194 else // release physical page to relevant cluster 1345 req.type = KMEM_PAGE; 1346 req.ptr = page_ptr; 1347 kmem_free( &req ); 1348 } 1349 else 1195 1350 { 1196 // release the lock protecting the page 1197 remote_busylock_release( lock_xp ); 1198 1199 // release the page to kmem 1200 if( page_cxy == local_cxy ) // local cluster 1201 { 1202 req.type = KMEM_PAGE; 1203 req.ptr = page_ptr; 1204 kmem_free( &req ); 1205 } 1206 else // remote cluster 1207 { 1208 rpc_pmem_release_pages_client( page_cxy , page_ptr ); 1209 } 1210 1211 #if( DEBUG_VMM_DELETE_VSEG & 1 ) 1212 if( DEBUG_VMM_DELETE_VSEG < cycle ) 1213 printk("- release ppn %x\n", ppn ); 1214 #endif 1351 rpc_pmem_release_pages_client( page_cxy , page_ptr ); 1215 1352 } 1216 1217 1353 } 1354 1355 #if( DEBUG_VMM_REMOVE_VSEG & 1 ) 1356 if( DEBUG_VMM_REMOVE_VSEG < cycle ) 1357 { 1358 if( ppn_release ) printk(" / released to kmem\n" ); 1359 else printk("\n"); 1360 } 1361 #endif 1218 1362 } 1219 1363 } 1220 1364 1221 // remove vseg from VSL and release vseg descriptor (if not MMAP)1365 // remove vseg from VSL 1222 1366 vmm_detach_vseg_from_vsl( vmm , vseg ); 1223 1367 1224 #if DEBUG_VMM_DELETE_VSEG 1368 // release vseg descriptor depending on vseg type 1369 if( vseg_type == VSEG_TYPE_STACK ) 1370 { 1371 // release slot to local stack allocator 1372 vmm_stack_free( vmm , vseg ); 1373 1374 // release vseg descriptor to local kmem 1375 vseg_free( vseg ); 1376 } 1377 else if( (vseg_type == VSEG_TYPE_ANON) || 1378 (vseg_type == VSEG_TYPE_FILE) || 1379 (vseg_type == VSEG_TYPE_REMOTE) ) 1380 { 1381 // release vseg to local mmap allocator 1382 vmm_mmap_free( vmm , vseg ); 1383 } 1384 else 1385 { 1386 // release vseg descriptor to local kmem 1387 vseg_free( vseg ); 1388 } 1389 1390 #if DEBUG_VMM_REMOVE_VSEG 1225 1391 cycle = (uint32_t)hal_get_cycles(); 1226 if( DEBUG_VMM_DELETE_VSEG < cycle ) 1227 printk("\n[%s] thread[%x,%x] exit / process %x / vseg %s / base %x / cycle %d\n", 1228 __FUNCTION__, this->process->pid, this->trdid, pid, vseg_type_str(vseg->type), vaddr, cycle ); 1229 #endif 1230 1231 } // end vmm_delete_vseg() 1392 if( DEBUG_VMM_REMOVE_VSEG < cycle ) 1393 printk("[%s] thread[%x,%x] exit / process %x / %s / base %x / cycle %d\n", 1394 __FUNCTION__, this->process->pid, this->trdid, 1395 process->pid, vseg_type_str(vseg->type), vseg->min, cycle ); 1396 #endif 1397 1398 } // end vmm_remove_vseg() 1399 1400 1401 /////////////////////////////////// 1402 void vmm_delete_vseg( pid_t pid, 1403 intptr_t vaddr ) 1404 { 1405 process_t * process; // local pointer on local process 1406 vseg_t * vseg; // local pointer on local vseg containing vaddr 1407 1408 // get local pointer on local process descriptor 1409 process = cluster_get_local_process_from_pid( pid ); 1410 1411 if( process == NULL ) 1412 { 1413 printk("\n[WARNING] in %s : cannot get local process descriptor\n", 1414 __FUNCTION__ ); 1415 return; 1416 } 1417 1418 // get local pointer on local vseg containing vaddr 1419 vseg = vmm_vseg_from_vaddr( &process->vmm , vaddr ); 1420 1421 if( vseg == NULL ) 1422 { 1423 printk("\n[WARNING] in %s : cannot get vseg descriptor\n", 1424 __FUNCTION__ ); 1425 return; 1426 } 1427 1428 // call relevant function 1429 vmm_remove_vseg( process , vseg ); 1430 1431 } // end vmm_delete_vseg 1432 1232 1433 1233 1434 ///////////////////////////////////////////// … … 1235 1436 intptr_t vaddr ) 1236 1437 { 1237 xptr_t iter_xp;1238 1438 xptr_t vseg_xp; 1239 1439 vseg_t * vseg; 1440 xptr_t iter_xp; 1240 1441 1241 1442 // get extended pointers on VSL lock and root 1242 xptr_t lock_xp = XPTR( local_cxy , &vmm->vs egs_lock );1443 xptr_t lock_xp = XPTR( local_cxy , &vmm->vsl_lock ); 1243 1444 xptr_t root_xp = XPTR( local_cxy , &vmm->vsegs_root ); 1244 1445 … … 1249 1450 XLIST_FOREACH( root_xp , iter_xp ) 1250 1451 { 1452 // get pointers on vseg 1251 1453 vseg_xp = XLIST_ELEMENT( iter_xp , vseg_t , xlist ); 1252 1454 vseg = GET_PTR( vseg_xp ); 1253 1455 1456 // return success when match 1254 1457 if( (vaddr >= vseg->min) && (vaddr < vseg->max) ) 1255 1458 { … … 1262 1465 // return failure 1263 1466 remote_rwlock_rd_release( lock_xp ); 1264 1265 1467 return NULL; 1266 1468 … … 1462 1664 vseg_init_from_ref( vseg , vseg_xp ); 1463 1665 1666 // build extended pointer on VSL lock 1667 xptr_t lock_xp = XPTR( local_cxy , &vmm->vsl_lock ); 1668 1669 // take the VSL lock in write mode 1670 remote_rwlock_wr_acquire( lock_xp ); 1671 1464 1672 // register local vseg in local VSL 1465 1673 vmm_attach_vseg_to_vsl( vmm , vseg ); 1674 1675 // release the VSL lock 1676 remote_rwlock_wr_release( lock_xp ); 1466 1677 } 1467 1678 … … 1486 1697 uint32_t cycle = (uint32_t)hal_get_cycles(); 1487 1698 thread_t * this = CURRENT_THREAD; 1488 xptr_t this_xp = XPTR( local_cxy , this );1489 1699 if( DEBUG_VMM_ALLOCATE_PAGE < (uint32_t)hal_get_cycles() ) 1490 1700 printk("\n[%s] thread[%x,%x] enter for vpn %x / cycle %d\n", … … 1717 1927 error_t error; // value returned by called functions 1718 1928 1929 #if DEBUG_VMM_HANDLE_PAGE_FAULT 1930 uint32_t cycle = (uint32_t)hal_get_cycles(); 1931 thread_t * this = CURRENT_THREAD; 1932 if( DEBUG_VMM_HANDLE_PAGE_FAULT < cycle ) 1933 printk("\n[%s] thread[%x,%x] enter for vpn %x / cycle %d\n", 1934 __FUNCTION__, this->process->pid, this->trdid, vpn, cycle ); 1935 hal_vmm_display( process , true ); 1936 #endif 1937 1719 1938 // get local vseg (access to reference VSL can be required) 1720 1939 error = vmm_get_vseg( process, … … 1723 1942 if( error ) 1724 1943 { 1725 printk("\n[ERROR] in %s : vpn %x in process %x not in a registered vseg\n",1726 __FUNCTION__ , vpn , process->pid );1944 printk("\n[ERROR] in %s : vpn %x in process %x not in registered vseg / cycle %d\n", 1945 __FUNCTION__ , vpn , process->pid, (uint32_t)hal_get_cycles() ); 1727 1946 1728 1947 return EXCP_USER_ERROR; 1729 1948 } 1730 1949 1731 #if DEBUG_VMM_HANDLE_PAGE_FAULT 1732 uint32_t cycle = (uint32_t)hal_get_cycles(); 1733 thread_t * this = CURRENT_THREAD; 1950 #if DEBUG_VMM_HANDLE_PAGE_FAULT 1951 cycle = (uint32_t)hal_get_cycles(); 1734 1952 if( DEBUG_VMM_HANDLE_PAGE_FAULT < cycle ) 1735 printk("\n[%s] threadr[%x,%x] enter for vpn %x /%s / cycle %d\n",1736 __FUNCTION__, this->process->pid, this->trdid, v pn, vseg_type_str(vseg->type), cycle );1953 printk("\n[%s] threadr[%x,%x] found vseg %s / cycle %d\n", 1954 __FUNCTION__, this->process->pid, this->trdid, vseg_type_str(vseg->type), cycle ); 1737 1955 #endif 1738 1956 … … 1971 2189 error_t error; 1972 2190 2191 thread_t * this = CURRENT_THREAD; 2192 1973 2193 #if DEBUG_VMM_HANDLE_COW 1974 2194 uint32_t cycle = (uint32_t)hal_get_cycles(); 1975 thread_t * this = CURRENT_THREAD;1976 xptr_t this_xp = XPTR( local_cxy , this );1977 2195 if( DEBUG_VMM_HANDLE_COW < cycle ) 1978 2196 printk("\n[%s] thread[%x,%x] enter for vpn %x / core[%x,%d] / cycle %d\n", 1979 2197 __FUNCTION__, this->process->pid, this->trdid, vpn, local_cxy, this->core->lid, cycle ); 2198 hal_vmm_display( process , true ); 1980 2199 #endif 1981 2200 … … 1991 2210 if( error ) 1992 2211 { 1993 printk("\n[PANIC] in %s : vpn %x in process %xnot in a registered vseg\n",1994 __FUNCTION__, vpn, process->pid );2212 printk("\n[PANIC] in %s vpn %x in thread[%x,%x] not in a registered vseg\n", 2213 __FUNCTION__, vpn, process->pid, this->trdid ); 1995 2214 1996 2215 return EXCP_KERNEL_PANIC; -
trunk/kernel/mm/vmm.h
r624 r625 48 48 * Each slot can contain one user stack vseg. The first 4 Kbytes page in the slot is not 49 49 * mapped to detect stack overflow. 50 * The slot index can be computed form the slot base address, and reversely. 51 * All allocation / release operations are registered in the stack_bitmap, that completely 52 * define the STACK zone status. 50 * In this implementation, the slot index is defined by the user thead LTID. 51 * All allocated stacks are registered in a bitmap defining the STACK zone state: 52 * - The allocator checks that the requested slot has not been already allocated, and set the 53 * corresponding bit in the bitmap. 54 * - The de-allocator function reset the corresponding bit in the bitmap. 53 55 ********************************************************************************************/ 54 56 … … 57 59 busylock_t lock; /*! lock protecting STACK allocator */ 58 60 vpn_t vpn_base; /*! first page of STACK zone */ 59 bitmap_t bitmap; /*! bit bector of allocated stacks */61 bitmap_t bitmap; /*! bit vector of allocated stacks */ 60 62 } 61 63 stack_mgr_t; … … 84 86 vpn_t vpn_size; /*! number of pages in MMAP zone */ 85 87 vpn_t first_free_vpn; /*! first free page in MMAP zone */ 86 list_entry_tzombi_list[32]; /*! array of roots of released vsegs lists */88 xlist_entry_t zombi_list[32]; /*! array of roots of released vsegs lists */ 87 89 } 88 90 mmap_mgr_t; … … 109 111 typedef struct vmm_s 110 112 { 111 remote_rwlock_t vs egs_lock;/*! lock protecting the local VSL */113 remote_rwlock_t vsl_lock; /*! lock protecting the local VSL */ 112 114 xlist_entry_t vsegs_root; /*! Virtual Segment List (complete in reference) */ 113 115 uint32_t vsegs_nr; /*! total number of local vsegs */ … … 132 134 133 135 /********************************************************************************************* 134 * This function initialises the virtual memory manager attached to an user process. 136 * This function mkkes a partial initialisation of the VMM attached to an user process. 137 * The GPT must have been previously created, with the hal_gpt_create() function. 138 * - It registers "args", "envs" vsegs in the VSL. 135 139 * - It initializes the STACK and MMAP allocators. 136 * - It registers the "kentry", "args", "envs" vsegs in the VSL. 137 * - It initializes the generic page table, calling the HAL specific hal_gpt_init() function. 138 * - For TSAR it map all pages for the "kentry" vseg, that must be identity mapping. 139 ******************************************************a************************************** 140 * Implementation notes: 140 * Note: 141 141 * - The "code" and "data" vsegs are registered by the elf_load_process() function. 142 * - The "stack" vsegs are dynamically created by the thread_user_create() function.143 * - The "file", "anon", "remote" vsegs are dynamically created by the mmap() syscall.142 * - The "stack" vsegs are dynamically registered by the thread_user_create() function. 143 * - The "file", "anon", "remote" vsegs are dynamically registered by the mmap() syscall. 144 144 ********************************************************************************************* 145 145 * @ process : pointer on process descriptor 146 146 * @ return 0 if success / return -1 if failure. 147 147 ********************************************************************************************/ 148 error_t vmm_init( struct process_s * process ); 149 150 /********************************************************************************************* 151 * This function displays on TXY0 the list or registered vsegs for a given <process>. 152 * It must be executed by a thread running in reference cluster. 153 * If the <mapping> argument is true, it displays for each vseg all mapped PTEs in GPT. 148 error_t vmm_user_init( struct process_s * process ); 149 150 /********************************************************************************************* 151 * This function re-initialises the VMM attached to an user process to prepare a new 152 * call to the vmm_user_init() function after an exec() syscall. 153 * It removes from the VMM of the process identified by the <process> argument all 154 * non kernel vsegs (i.e. all user vsegs), by calling the vmm_remove_vseg() function. 155 * - the vsegs are removed from the VSL. 156 * - the corresponding GPT entries are removed from the GPT. 157 * - the physical pages are released to the relevant kmem when they are not shared. 158 * The VSL and the GPT are not modified for the kernel vsegs. 154 159 ********************************************************************************************* 155 160 * @ process : pointer on process descriptor. 156 * @ mapping : detailed mapping if true. 157 ********************************************************************************************/ 158 void hal_vmm_display( struct process_s * process, 159 bool_t mapping ); 161 ********************************************************************************************/ 162 void vmm_user_reset( struct process_s * process ); 160 163 161 164 /********************************************************************************************* 162 165 * This function is called by the process_make_fork() function. It partially copies 163 166 * the content of a remote parent process VMM to the local child process VMM: 164 * - all DATA, MMAP, REMOTE vsegs registered in the parent VSL are registered in the child165 * VSL, and all valid GPT entries in parent GPT are copied to the child GPT.166 * The WRITABLE flag is reset and the COW flag is set in child GPT.167 * - all CODE vsegs registered in the parent VSL are registered in the child VSL, but the168 * GPT entries are not copied in the chil f GPT, thatwill be dynamically updated from167 * - All DATA, ANON, REMOTE vsegs registered in the parent VSL are registered in the 168 * child VSL. All valid PTEs in parent GPT are copied to the child GPT, but the 169 * WRITABLE flag is reset and the COW flag is set. 170 * - All CODE vsegs registered in the parent VSL are registered in the child VSL, but the 171 * GPT entries are not copied in the child GPT, and will be dynamically updated from 169 172 * the .elf file when a page fault is reported. 170 * - all FILE vsegs registered in the parent VSL are registered in the child VSL, and all173 * - All FILE vsegs registered in the parent VSL are registered in the child VSL, and all 171 174 * valid GPT entries in parent GPT are copied to the child GPT. The COW flag is not set. 172 * - no STACK vseg is copied from parent VMM to child VMM, because the child STACKvseg175 * - No STACK vseg is copied from parent VMM to child VMM, because the child stack vseg 173 176 * must be copied later from the cluster containing the user thread requesting the fork(). 177 * - The KERNEL vsegs required by the target architecture are re-created in the child 178 * VMM, from the local kernel process VMM, using the hal_vmm_kernel_update() function. 174 179 ********************************************************************************************* 175 180 * @ child_process : local pointer on local child process descriptor. … … 196 201 197 202 /********************************************************************************************* 198 * This global function modifies a GPT entry identified by the <process> and <vpn>199 * argumentsin all clusters containing a process copy.203 * This function modifies a GPT entry identified by the <process> and <vpn> arguments 204 * in all clusters containing a process copy. 200 205 * It must be called by a thread running in the reference cluster. 201 206 * It updates all copies of the process in all clusters, to maintain coherence in GPT copies, … … 240 245 /********************************************************************************************* 241 246 * This function allocates memory for a vseg descriptor, initialises it, and register it 242 * in the VMM of the local process descriptor, that must be the reference process. 243 * For the 'stack", "file", "anon", & "remote" types, it does not use the <base> argument, 244 * but uses the STACK and MMAP virtual memory allocators. 247 * in the VSL of the local process descriptor, that must be the reference process. 248 * - For the FILE, ANON, & REMOTE types, it does not use the <base> and <size> arguments, 249 * but uses the specific MMAP virtual memory allocator. 250 * - For the STACK type, it does not use the <size> argument, and the <base> argument 251 * defines the user thread LTID used by the specific STACK virtual memory allocator. 245 252 * It checks collision with all pre-existing vsegs. 246 * To comply with the "on-demand" paging policy, this function does NOT modify the page table,253 * To comply with the "on-demand" paging policy, this function does NOT modify the GPT, 247 254 * and does not allocate physical memory for vseg data. 248 255 * It should be called by a local thread (could be a RPC thread if the client thread is not 249 * running in the re gerence cluster).256 * running in the reference cluster). 250 257 ********************************************************************************************* 251 258 * @ process : pointer on local processor descriptor. 252 259 * @ type : vseg type. 253 * @ base : vseg base address ( not used for dynamically allocated vsegs).260 * @ base : vseg base address (or user thread ltid for an user stack vseg). 254 261 * @ size : vseg size (bytes). 255 262 * @ file_offset : offset in file for CODE, DATA, FILE types. … … 269 276 270 277 /********************************************************************************************* 271 * This function removes from the local VMM of a process descriptor identified by the <pid> 272 * argument a local vseg identified by its base address <vaddr> in user space. 273 * It can be used for any type of vseg, but must be called by a local thread. 274 * Use the RPC_VMM_DELETE_VSEG if the client thread is not local. 275 * It does nothing if the process is not registered in the local cluster. 276 * It does nothing if the vseg is not registered in the local process VSL. 277 * - It removes from the local GPT all registered PTEs. If it is executed in the reference 278 * cluster, it releases the referenced physical pages, to the relevant kmem allocator, 279 * depending on vseg type and the pending forks counter. 280 * - It removes the vseg from the local VSL, and release the vseg descriptor if not MMAP. 281 ********************************************************************************************* 282 * @ process : process identifier. 283 * @ vaddr : vseg base address in user space. 278 * This function removes from the VMM of a process descriptor identified by the <process> 279 * argument the vseg identified by the <vseg> argument. It can be used for any type of vseg. 280 * As it uses local pointers, it must be called by a local thread. 281 * It is called by the vmm_user_reset(), vmm_delete_vseg() and vmm_destroy() functions. 282 * It makes a kernel panic if the process is not registered in the local cluster, 283 * or if the vseg is not registered in the process VSL. 284 * For all vseg types, the vseg is detached from local VSL, and all associated PTEs are 285 * unmapped from local GPT. Other actions depend on the vseg type: 286 * - Regarding the vseg descriptor release: 287 * . for ANON and REMOTE, the vseg is not released, but registered in local zombi_list. 288 * . for STACK the vseg is released to the local stack allocator. 289 * . for all other types, the vseg is released to the local kmem. 290 * - Regarding the physical pages release: 291 * . for KERNEL and FILE, the pages are not released to kmem. 292 * . for CODE and STACK, the pages are released to local kmem when they are not COW. 293 * . for DATA, ANON and REMOTE, the pages are released to relevant kmem only when 294 * the local cluster is the reference cluster. 295 * The lock protecting the VSL must be taken by the caller. 296 ********************************************************************************************* 297 * @ process : local pointer on process. 298 * @ vseg : local pointer on vseg. 299 ********************************************************************************************/ 300 void vmm_remove_vseg( struct process_s * process, 301 struct vseg_s * vseg ); 302 303 /********************************************************************************************* 304 * This function call the vmm_remove vseg() function to remove from the VMM of a local 305 * process descriptor, identified by the <pid> argument the vseg identified by the <vaddr> 306 * virtual address in user space. 307 * Use the RPC_VMM_DELETE_VSEG to remove a vseg from a remote process descriptor. 308 ********************************************************************************************* 309 * @ pid : process identifier. 310 * @ vaddr : virtual address in user space. 284 311 ********************************************************************************************/ 285 312 void vmm_delete_vseg( pid_t pid, 286 313 intptr_t vaddr ); 287 288 /*********************************************************************************************289 * This function insert a new <vseg> descriptor in the VSL identifed by the <vmm> argument.290 * and updates the vmm field in the vseg descriptor.291 * It takes the lock protecting VSL.292 *********************************************************************************************293 * @ vmm : local pointer on local VMM.294 * @ vseg : local pointer on local vseg descriptor.295 ********************************************************************************************/296 void vmm_attach_vseg_to_vsl( vmm_t * vmm,297 vseg_t * vseg );298 299 /*********************************************************************************************300 * This function removes a vseg identified by the <vseg> argument from the local VSL301 * identified by the <vmm> argument and release the memory allocated to vseg descriptor,302 * for all vseg types, BUT the MMAP type (i.e. ANON or REMOTE).303 * - If the vseg has not the STACK or MMAP type, it is simply removed from the VSL,304 * and vseg descriptor is released.305 * - If the vseg has the STACK type, it is removed from VSL, vseg descriptor is released,306 * and the stack slot is returned to the local VMM_STACK allocator.307 * - If the vseg has the MMAP type, it is removed from VSL and is registered in zombi_list308 * of the VMM_MMAP allocator for future reuse. The vseg descriptor is NOT released.309 *********************************************************************************************310 * @ vmm : local pointer on local VMM.311 * @ vseg : local pointer on local vseg to be removed.312 ********************************************************************************************/313 void vmm_detach_vseg_from_vsl( vmm_t * vmm,314 vseg_t * vseg );315 314 316 315 /********************************************************************************************* -
trunk/kernel/mm/vseg.c
r623 r625 61 61 } 62 62 63 ///////////////////// 63 /////////////////////////// 64 64 vseg_t * vseg_alloc( void ) 65 65 { -
trunk/kernel/mm/vseg.h
r623 r625 70 70 /******************************************************************************************* 71 71 * This structure defines a virtual segment descriptor. 72 * -The VSL contains only local vsegs, but is implemented as an xlist, because it can be73 * accessed bythread running in a remote cluster.74 * - The zombi list is used by the local MMAP allocator. It is implemented as a local list.72 * The VSL contains only local vsegs, but is implemented as an xlist, because it can be 73 * accessed by a thread running in a remote cluster. 74 * The xlist field is also used to implement the zombi lists used by the MMAP allocator. 75 75 ******************************************************************************************/ 76 76 … … 78 78 { 79 79 xlist_entry_t xlist; /*! all vsegs in same VSL */ 80 list_entry_t zlist; /*! all vsegs in same zombi list */81 80 struct vmm_s * vmm; /*! pointer on associated VM manager */ 82 81 uint32_t type; /*! vseg type */ -
trunk/kernel/syscalls/sys_barrier.c
r624 r625 2 2 * sys_barrier.c - Access a POSIX barrier. 3 3 * 4 * authors Alain Greiner (2016,2017,2018 )4 * authors Alain Greiner (2016,2017,2018,2019) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 25 25 #include <hal_special.h> 26 26 #include <hal_uspace.h> 27 #include <hal_vmm.h> 27 28 #include <errno.h> 28 29 #include <thread.h> … … 56 57 process_t * process = this->process; 57 58 59 #if (DEBUG_SYS_BARRIER || CONFIG_INSTRUMENTATION_SYSCALLS) 60 uint64_t tm_start = hal_get_cycles(); 61 #endif 62 58 63 #if DEBUG_SYS_BARRIER 59 uint64_t tm_start;60 uint64_t tm_end;61 tm_start = hal_get_cycles();62 64 if( DEBUG_SYS_BARRIER < tm_start ) 63 65 printk("\n[%s] thread[%x,%x] enters for %s / count %d / cycle %d\n", … … 184 186 } // end switch 185 187 188 hal_fence(); 189 190 #if (DEBUG_SYS_BARRIER || CONFIG_INSTRUMENTATION_SYSCALLS) 191 uint64_t tm_end = hal_get_cycles(); 192 #endif 193 186 194 #if DEBUG_SYS_BARRIER 187 tm_end = hal_get_cycles();188 195 if( DEBUG_SYS_BARRIER < tm_end ) 189 printk("\n[%s] thread[%x,%x] exit for %s / cost %d / cycle %d\n", 190 __FUNCTION__, process->pid, this->trdid, sys_barrier_op_str(operation), 191 (uint32_t)(tm_end - tm_start), (uint32_t)tm_end ); 196 printk("\n[%s] thread[%x,%x] exit for %s / cycle %d\n", 197 __FUNCTION__, process->pid, this->trdid, sys_barrier_op_str(operation), (uint32_t)tm_end ); 198 #endif 199 200 #if CONFIG_INSTRUMENTATION_SYSCALLS 201 hal_atomic_add( &syscalls_cumul_cost[SYS_BARRIER] , tm_end - tm_start ); 202 hal_atomic_add( &syscalls_occurences[SYS_BARRIER] , 1 ); 192 203 #endif 193 204 -
trunk/kernel/syscalls/sys_close.c
r594 r625 35 35 int sys_close ( uint32_t file_id ) 36 36 { 37 error_t error; 38 xptr_t file_xp; 37 error_t error; 38 xptr_t file_xp; 39 cxy_t file_cxy; 40 vfs_file_t * file_ptr; 41 vfs_inode_type_t file_type; 39 42 40 43 thread_t * this = CURRENT_THREAD; … … 54 57 if( file_id >= CONFIG_PROCESS_FILE_MAX_NR ) 55 58 { 56 printk("\n[ERROR] in %s : illegal file descriptor index = %d\n", 57 __FUNCTION__ , file_id ); 59 60 #if DEBUG_SYSCALLS_ERROR 61 printk("\n[ERROR] in %s : illegal file descriptor index = %d\n", 62 __FUNCTION__ , file_id ); 63 #endif 58 64 this->errno = EBADFD; 59 65 return -1; … … 73 79 return -1; 74 80 } 81 82 // get file type 83 file_cxy = GET_CXY( file_xp ); 84 file_ptr = GET_PTR( file_xp ); 85 file_type = hal_remote_l32( XPTR( file_cxy , &file_ptr->type ) ); 86 87 if( file_type == INODE_TYPE_DIR ) 88 { 89 90 #if DEBUG_SYSCALLS_ERROR 91 printk("\n[ERROR] in %s : file descriptor %d is a directory\n", 92 __FUNCTION__ , file_id ); 93 #endif 94 this->errno = EBADFD; 95 return -1; 96 } 75 97 76 98 // call the relevant VFS function -
trunk/kernel/syscalls/sys_display.c
r624 r625 96 96 // check string in user space 97 97 error = vmm_get_vseg( process , (intptr_t)arg0 , &vseg ); 98 99 98 if( error ) 100 99 { … … 110 109 // ckeck string length 111 110 length = hal_strlen_from_uspace( string ); 112 113 111 if( length >= 512 ) 114 112 { … … 150 148 // get extended pointer on process PID in cluster CXY 151 149 xptr_t process_xp = cluster_get_process_from_pid_in_cxy( cxy , pid ); 152 153 150 if( process_xp == XPTR_NULL ) 154 151 { -
trunk/kernel/syscalls/sys_exec.c
r584 r625 2 2 * sys_exec.c - Kernel function implementing the "exec" system call. 3 3 * 4 * Authors Alain Greiner (2016,2017 )4 * Authors Alain Greiner (2016,2017,2017,2019) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 208 208 #if DEBUG_SYS_EXEC 209 209 if( DEBUG_SYS_EXEC < tm_start ) 210 printk("\n[ DBG] %s :thread[%x,%x] enter for path <%s> / cycle = %d\n",210 printk("\n[%s] thread[%x,%x] enter for path <%s> / cycle = %d\n", 211 211 __FUNCTION__, pid, this->trdid, exec_info.path, (uint32_t)tm_start ); 212 212 #endif … … 256 256 } 257 257 258 assert( false , "we should n otexecute this code" );258 assert( false , "we should never execute this code" ); 259 259 260 260 return 0; -
trunk/kernel/syscalls/sys_exit.c
r619 r625 2 2 * sys_exit.c - Kernel function implementing the "exit" system call. 3 3 * 4 * Author Alain Greiner (2016,2017,2018 )4 * Author Alain Greiner (2016,2017,2018,2019) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 53 53 pid_t pid = process->pid; 54 54 55 #if (DEBUG_SYS_EXIT || CONFIG_INSTRUMENTATION_SYSCALLS) 56 uint64_t tm_start = hal_get_cycles(); 57 #endif 58 55 59 #if DEBUG_SYS_EXIT 56 uint64_t tm_start;57 uint64_t tm_end;58 tm_start = hal_get_cycles();59 60 if( DEBUG_SYS_EXIT < tm_start ) 60 61 printk("\n[%s] thread[%x,%x] enter / status %x / cycle %d\n", 61 __FUNCTION__, p rocess->pid, this->trdid , status , (uint32_t)tm_start );62 __FUNCTION__, pid, this->trdid , status , (uint32_t)tm_start ); 62 63 #endif 63 64 … … 66 67 owner_cxy = GET_CXY( owner_xp ); 67 68 owner_ptr = GET_PTR( owner_xp ); 68 69 #if (DEBUG_SYS_EXIT & 1)70 if( DEBUG_SYS_EXIT < tm_start )71 printk("\n[%s] thread[%x,%x] get owner process in cluster %x\n",72 __FUNCTION__, process->pid, this->trdid, owner_cxy );73 #endif74 69 75 70 // get local pointer on the main thread … … 80 75 parent_cxy = GET_CXY( parent_xp ); 81 76 parent_ptr = GET_PTR( parent_xp ); 82 83 #if (DEBUG_SYS_EXIT & 1)84 if( DEBUG_SYS_EXIT < tm_start )85 printk("\n[%s] thread[%x,%x] get parent process in cluster %x\n",86 __FUNCTION__, process->pid, this->trdid, parent_cxy );87 #endif88 77 89 78 // get pointers on the parent process main thread … … 96 85 #if( DEBUG_SYS_EXIT & 1) 97 86 if( DEBUG_SYS_EXIT < tm_start ) 98 printk("\n[%s] thread[%x,%x] detached process from TXT\n",99 __FUNCTION__, p rocess->pid, this->trdid );87 printk("\n[%s] thread[%x,%x] detached process %x from TXT\n", 88 __FUNCTION__, pid, this->trdid, pid ); 100 89 #endif 101 90 102 91 // mark for delete all process threads in all clusters, 103 92 // but the main thread and this calling thread 104 process_sigaction( p rocess->pid , DELETE_ALL_THREADS );93 process_sigaction( pid , DELETE_ALL_THREADS ); 105 94 106 95 #if( DEBUG_SYS_EXIT & 1) 107 96 if( DEBUG_SYS_EXIT < tm_start ) 108 printk("\n[%s] thread[%x,%x] deleted all threads but itself\n",109 __FUNCTION__, p rocess->pid, this->trdid );97 printk("\n[%s] thread[%x,%x] deleted all threads in process %x (but itself)\n", 98 __FUNCTION__, pid, this->trdid, pid ); 110 99 #endif 111 100 … … 116 105 #if( DEBUG_SYS_EXIT & 1) 117 106 if( tm_start > DEBUG_SYS_EXIT ) 118 printk("\n[% u] thread[%x,%x] marked iself for delete\n",119 __FUNCTION__, p rocess->pid, this->trdid );107 printk("\n[%s] thread[%x,%x] marked iself for delete\n", 108 __FUNCTION__, pid, this->trdid ); 120 109 #endif 121 110 thread_delete( XPTR( local_cxy , this ) , pid , true ); 122 111 } 123 112 124 // block th ismain thread113 // block the main thread 125 114 thread_block( XPTR( owner_cxy , main_ptr ) , THREAD_BLOCKED_GLOBAL ); 126 115 127 116 #if( DEBUG_SYS_EXIT & 1) 117 trdid_t main_trdid = hal_remote_l32( XPTR( owner_cxy , &main_ptr->trdid ) ); 128 118 if( tm_start > DEBUG_SYS_EXIT ) 129 printk("\n[%s] thread[%x,%x] blocked main thread \n",130 __FUNCTION__, p rocess->pid, this->trdid );119 printk("\n[%s] thread[%x,%x] blocked main thread[%x,%x]\n", 120 __FUNCTION__, pid, this->trdid, pid, main_trdid ); 131 121 #endif 132 122 133 // atomically update owner process descriptor term_stateto ask134 // the parent process sys_wait() function to delete the main thread123 // update term_state in owner process descriptor to ask 124 // the parent process sys_wait() function to delete the process 135 125 term_state = (status & 0xFF) | PROCESS_TERM_EXIT; 136 126 hal_remote_atomic_or( XPTR( owner_cxy , &owner_ptr->term_state ) , term_state ); … … 139 129 if( tm_start > DEBUG_SYS_EXIT ) 140 130 printk("\n[%s] thread[%x,%x] set exit status %x in owner process\n", 141 __FUNCTION__, p rocess->pid, this->trdid, term_state );131 __FUNCTION__, pid, this->trdid, term_state ); 142 132 #endif 143 133 … … 148 138 if( tm_start > DEBUG_SYS_EXIT ) 149 139 printk("\n[%s] thread[%x,%x] unblocked parent main thread in process %x\n", 150 __FUNCTION__ , p rocess->pid, this->trdid,140 __FUNCTION__ , pid, this->trdid, 151 141 hal_remote_l32( XPTR( parent_cxy , &parent_ptr->pid) ) ); 152 142 #endif … … 154 144 hal_fence(); 155 145 146 #if (DEBUG_SYS_EXIT || CONFIG_INSTRUMENTATION_SYSCALLS) 147 uint64_t tm_end = hal_get_cycles(); 148 #endif 149 156 150 #if DEBUG_SYS_EXIT 157 tm_end = hal_get_cycles();158 151 if( DEBUG_SYS_EXIT < tm_end ) 159 printk("\n[%s] thread[%x,%x] exit / status %x / cost = %d / cycle %d\n", 160 __FUNCTION__, process->pid, this->trdid, status, 161 (uint32_t)(tm_end - tm_start), (uint32_t)tm_end ); 152 printk("\n[%s] thread[%x,%x] exit / term_state %x / cycle %d\n", 153 __FUNCTION__, pid, this->trdid, term_state, (uint32_t)tm_end ); 154 #endif 155 156 #if CONFIG_INSTRUMENTATION_SYSCALLS 157 hal_atomic_add( &syscalls_cumul_cost[SYS_EXIT] , tm_end - tm_start ); 158 hal_atomic_add( &syscalls_occurences[SYS_EXIT] , 1 ); 162 159 #endif 163 160 -
trunk/kernel/syscalls/sys_fork.c
r594 r625 2 2 * sys_fork.c - Kernel function implementing the "fork" system call. 3 3 * 4 * Authors Alain Greiner (2016,2017 )4 * Authors Alain Greiner (2016,2017,2018,2019) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 73 73 #if DEBUG_SYS_FORK 74 74 if( DEBUG_SYS_FORK < tm_start ) 75 printk("\n[ DBG] %s :thread[%x,%x] enter / cycle = %d\n",75 printk("\n[%s] thread[%x,%x] enter / cycle = %d\n", 76 76 __FUNCTION__, parent_pid, parent_thread_ptr->trdid, (uint32_t)tm_start ); 77 77 #endif … … 151 151 152 152 // set remote child CPU context from parent_thread register values 153 // replicates the parent thread kernel stack to the child thread descriptor, 154 // and finally unblock the child thread. 153 155 hal_cpu_context_fork( XPTR( child_cxy , child_thread_ptr ) ); 154 156 155 157 // From this point, both parent and child threads execute the following code, 156 // but they can be distinguished by the (CURRENT_THREAD,local_cxy) values.157 // - parent unblock child, and return child PID to user application.158 // - child thread does nothing, and return 0 to user pplication159 // The child thread will only execute it when it is unblocked by parent thread.158 // but child thread will only execute it after being unblocked by parent thread. 159 // They can be distinguished by the (CURRENT_THREAD,local_cxy) values. 160 // - parent return child PID to user application. 161 // - child return 0 to user application 160 162 161 163 thread_t * current = CURRENT_THREAD; … … 165 167 #endif 166 168 169 if( (current == parent_thread_ptr) && (local_cxy == parent_cxy) ) // parent thread 170 { 171 167 172 #if DEBUG_SYS_FORK 168 173 if( DEBUG_SYS_FORK < tm_end ) 169 printk("\n[%s] thread[%x,%x] exit/ cycle %d\n",170 __FUNCTION__, current->process->pid, current->trdid, (uint32_t)tm_end );174 printk("\n[%s] parent thread[%x,%x] exit / child_pid %x / cycle %d\n", 175 __FUNCTION__, current->process->pid, current->trdid, child_pid, (uint32_t)tm_end ); 171 176 #endif 172 177 173 if( (current == parent_thread_ptr) && (local_cxy == parent_cxy) ) // parent thread 174 { 175 // parent_thread unblock child_thread 176 thread_unblock( XPTR( child_cxy , child_thread_ptr ) , THREAD_BLOCKED_GLOBAL ); 177 178 // only parent contribute to instrumentation 179 178 // only parent contribute to instrumentation 180 179 #if CONFIG_INSTRUMENTATION_SYSCALLS 181 180 hal_atomic_add( &syscalls_cumul_cost[SYS_FORK] , tm_end - tm_start ); … … 186 185 else // child_thread 187 186 { 187 188 #if DEBUG_SYS_FORK 189 if( DEBUG_SYS_FORK < tm_end ) 190 printk("\n[%s] child thread[%x,%x] exit / child_pid %x / cycle %d\n", 191 __FUNCTION__, current->process->pid, current->trdid, child_pid, (uint32_t)tm_end ); 192 #endif 193 188 194 return 0; 189 195 } -
trunk/kernel/syscalls/sys_get_config.c
r624 r625 2 2 * sys_get_config.c - get hardware platform parameters. 3 3 * 4 * Author Alain Greiner (2016,2017,2018 )4 * Author Alain Greiner (2016,2017,2018,2019) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 24 24 #include <hal_kernel_types.h> 25 25 #include <hal_uspace.h> 26 #include <hal_vmm.h> 26 27 #include <hal_special.h> 27 28 #include <errno.h> … … 48 49 process_t * process = this->process; 49 50 51 #if (DEBUG_SYS_GET_CONFIG || CONFIG_INSTRUMENTATION_SYSCALLS) 52 uint64_t tm_start = hal_get_cycles(); 53 #endif 54 50 55 #if DEBUG_SYS_GET_CONFIG 51 uint64_t tm_start;52 uint64_t tm_end;53 56 tm_start = hal_get_cycles(); 54 57 if( DEBUG_SYS_GET_CONFIG < tm_start ) … … 114 117 hal_fence(); 115 118 119 #if (DEBUG_SYS_GET_CONFIG || CONFIG_INSTRUMENTATION_SYSCALLS) 120 uint64_t tm_end = hal_get_cycles(); 121 #endif 122 116 123 #if DEBUG_SYS_GET_CONFIG 117 tm_end = hal_get_cycles();118 124 if( DEBUG_SYS_GET_CONFIG < tm_end ) 119 125 printk("\n[DBG] %s : thread %x exit / process %x / cost %d / cycle %d\n", … … 121 127 #endif 122 128 129 #if CONFIG_INSTRUMENTATION_SYSCALLS 130 hal_atomic_add( &syscalls_cumul_cost[SYS_GET_CONFIG] , tm_end - tm_start ); 131 hal_atomic_add( &syscalls_occurences[SYS_GET_CONFIG] , 1 ); 132 #endif 133 123 134 return 0; 124 135 -
trunk/kernel/syscalls/sys_get_core.c
r624 r625 24 24 #include <hal_kernel_types.h> 25 25 #include <hal_uspace.h> 26 #include <hal_vmm.h> 26 27 #include <hal_special.h> 27 28 #include <errno.h> -
trunk/kernel/syscalls/sys_get_cycle.c
r624 r625 24 24 #include <hal_kernel_types.h> 25 25 #include <hal_uspace.h> 26 #include <hal_vmm.h> 26 27 #include <hal_special.h> 27 28 #include <errno.h> -
trunk/kernel/syscalls/sys_is_fg.c
r624 r625 2 2 * sys_fg.c - Kernel function implementing the "is_fg" system call. 3 3 * 4 * Author Alain Greiner (2016,2017,2018 )4 * Author Alain Greiner (2016,2017,2018,2019) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 25 25 #include <hal_kernel_types.h> 26 26 #include <hal_uspace.h> 27 #include <hal_vmm.h> 27 28 #include <hal_special.h> 28 29 #include <errno.h> -
trunk/kernel/syscalls/sys_mmap.c
r624 r625 2 2 * sys_mmap.c - map files, memory or devices into process virtual address space 3 3 * 4 * Authors Ghassan Almaless (2008,2009,2010,2011,2012) 5 * Alain Greiner (2016,2017,2018) 4 * Authors Alain Greiner (2016,2017,2018,2019) 6 5 * 7 6 * Copyright (c) UPMC Sorbonne Universites … … 25 24 #include <hal_kernel_types.h> 26 25 #include <hal_uspace.h> 26 #include <hal_vmm.h> 27 27 #include <hal_irqmask.h> 28 28 #include <shared_syscalls.h> -
trunk/kernel/syscalls/sys_munmap.c
r624 r625 2 2 * sys_munmap.c - unmap a mapping from process virtual address space 3 3 * 4 * Authors Ghassan Almaless (2008,2009,2010,2011,2012) 5 * Alain Greiner (2016,2017,2018) 4 * Authors Alain Greiner (2016,2017,2018,2019) 6 5 * 7 6 * Copyright (c) UPMC Sorbonne Universites … … 25 24 #include <hal_kernel_types.h> 26 25 #include <hal_uspace.h> 26 #include <hal_vmm.h> 27 27 #include <hal_irqmask.h> 28 28 #include <shared_syscalls.h> -
trunk/kernel/syscalls/sys_mutex.c
r624 r625 2 2 * sys_mutex.c - Access a POSIX mutex. 3 3 * 4 * Author Alain Greiner (2016,2017,2018 )4 * Author Alain Greiner (2016,2017,2018,2019) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 24 24 #include <hal_kernel_types.h> 25 25 #include <hal_special.h> 26 #include <hal_vmm.h> 26 27 #include <errno.h> 27 28 #include <thread.h> … … 56 57 process_t * process = this->process; 57 58 59 #if (DEBUG_SYS_MUTEX || CONFIG_INSTRUMENTATION_SYSCALLS) 60 uint64_t tm_start = hal_get_cycles(); 61 #endif 62 58 63 #if DEBUG_SYS_MUTEX 59 uint64_t tm_start;60 uint64_t tm_end;61 tm_start = hal_get_cycles();62 64 if( DEBUG_SYS_MUTEX < tm_start ) 63 printk("\n[ DBG] %s : thread %x in process %xenter for %s / cycle %d\n",65 printk("\n[%s] thread[%x,%x] enter for %s / cycle %d\n", 64 66 __FUNCTION__, this->trdid, process->pid, sys_mutex_op_str( operation ), (uint32_t)tm_start ); 65 67 #endif … … 221 223 hal_fence(); 222 224 225 #if (DEBUG_SYS_MUTEX || CONFIG_INSTRUMENTATION_SYSCALLS) 226 uint64_t tm_end = hal_get_cycles(); 227 #endif 228 223 229 #if DEBUG_SYS_MUTEX 224 tm_end = hal_get_cycles(); 225 if( DEBUG_SYS_MUTEX < tm_start ) 226 printk("\n[DBG] %s : thread %x in process %x exit for %s / cost %d / cycle %d\n", 227 __FUNCTION__, this->trdid, process->pid, sys_mutex_op_str( operation ), 228 (uint32_t)(tm_end - tm_start), (uint32_t)tm_end ); 230 if( DEBUG_SYS_MUTEX < tm_end ) 231 printk("\n[%s] thread[%x,%x] exit for %s / cycle %d\n", 232 __FUNCTION__, this->trdid, process->pid, sys_mutex_op_str( operation ), (uint32_t)tm_end ); 233 #endif 234 235 #if CONFIG_INSTRUMENTATION_SYSCALLS 236 hal_atomic_add( &syscalls_cumul_cost[SYS_MUTEX] , tm_end - tm_start ); 237 hal_atomic_add( &syscalls_occurences[SYS_MUTEX] , 1 ); 229 238 #endif 230 239 -
trunk/kernel/syscalls/sys_open.c
r610 r625 2 2 * sys_open.c - open a file. 3 3 * 4 * Author Alain Greiner (2016,2017 )4 * Author Alain Greiner (2016,2017,2018,2019) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites -
trunk/kernel/syscalls/sys_opendir.c
r624 r625 2 2 * sys_opendir.c - Open an user accessible VFS directory. 3 3 * 4 * Author Alain Greiner (2016,2017,2018 )4 * Author Alain Greiner (2016,2017,2018,2019) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 25 25 #include <hal_kernel_types.h> 26 26 #include <hal_uspace.h> 27 #include <hal_vmm.h> 27 28 #include <thread.h> 28 29 #include <process.h> -
trunk/kernel/syscalls/sys_read.c
r624 r625 1 1 /* 2 * sys_read.c - read bytes from a file2 * sys_read.c - Kernel function implementing the "read" system call. 3 3 * 4 * Author Alain Greiner (2016,2017,2018 )4 * Author Alain Greiner (2016,2017,2018,2019) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 24 24 #include <kernel_config.h> 25 25 #include <hal_kernel_types.h> 26 #include <hal_vmm.h> 26 27 #include <hal_uspace.h> 27 28 #include <hal_irqmask.h> -
trunk/kernel/syscalls/sys_readdir.c
r624 r625 25 25 #include <hal_kernel_types.h> 26 26 #include <hal_uspace.h> 27 #include <hal_vmm.h> 27 28 #include <errno.h> 28 29 #include <thread.h> -
trunk/kernel/syscalls/sys_thread_exit.c
r619 r625 64 64 uint64_t tm_start = hal_get_cycles(); 65 65 if( DEBUG_SYS_THREAD_EXIT < tm_start ) 66 printk("\n[%s] thread[%x,%x] /main => delete process / cycle %d\n",66 printk("\n[%s] thread[%x,%x] is main => delete process / cycle %d\n", 67 67 __FUNCTION__ , pid , trdid , (uint32_t)tm_start ); 68 68 #endif … … 76 76 uint64_t tm_start = hal_get_cycles(); 77 77 if( DEBUG_SYS_THREAD_EXIT < tm_start ) 78 printk("\n[%s] thread[%x,%x] /not main => delete thread / cycle %d\n",78 printk("\n[%s] thread[%x,%x] is not main => delete thread / cycle %d\n", 79 79 __FUNCTION__ , pid , trdid , (uint32_t)tm_start ); 80 80 #endif -
trunk/kernel/syscalls/sys_wait.c
r624 r625 2 2 * sys_wait.c - wait termination or blocking of a child process. 3 3 * 4 * Author Alain Greiner (2016,2017,2018 )4 * Author Alain Greiner (2016,2017,2018,2019) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 56 56 uint64_t cycle = hal_get_cycles(); 57 57 if( DEBUG_SYS_WAIT < cycle ) 58 printk("\n[ DBG] %s : thread %x in process %xenter / cycle %d\n",59 __FUNCTION__, this, process->pid, (uint32_t)cycle );58 printk("\n[%s] thread[%x,%x] enter / cycle %d\n", 59 __FUNCTION__, pid, this->trdid, (uint32_t)cycle ); 60 60 #endif 61 61 … … 67 67 68 68 #if DEBUG_SYSCALLS_ERROR 69 printk("\n[ERROR] in %s : status buffer %x unmapped for thread %x in process %x\n",70 __FUNCTION__ , (intptr_t)status, this->trdid , process->pid );69 printk("\n[ERROR] in %s : status buffer %x unmapped for thread[%x,%x]\n", 70 __FUNCTION__ , (intptr_t)status, pid, this->trdid ); 71 71 hal_vmm_display( process , false ); 72 72 #endif … … 86 86 87 87 #if DEBUG_SYSCALLS_ERROR 88 printk("\n[ERROR] in %s : calling thread %xis not thread 0 in owner cluster %x\n",89 __FUNCTION__ , trdid , owner_cxy );88 printk("\n[ERROR] in %s : calling thread[%x,%x] is not thread 0 in owner cluster %x\n", 89 __FUNCTION__ , pid, this->trdid , owner_cxy ); 90 90 #endif 91 91 this->errno = EINVAL; … … 119 119 child_thread = hal_remote_lpt(XPTR( child_cxy , &child_ptr->th_tbl[0] )); 120 120 121 #if (DEBUG_SYS_WAIT & 1)122 cycle = hal_get_cycles();123 if( DEBUG_SYS_WAIT < cycle )124 printk("\n[DBG] %s : thread %x in process %x check child %x / state %x\n",125 __FUNCTION__, this, process->pid, child_pid, child_state );126 #endif127 121 // test if this child process is terminated, 128 122 // but termination not yet reported to parent process … … 148 142 if( DEBUG_SYS_WAIT < cycle ) 149 143 { 150 if 151 printk("\n[ DBG] %s : thread %x in process %x exit / child %x exit/ cycle %d\n",152 __FUNCTION__, this, process->pid, child_pid, (uint32_t)cycle );144 if( child_state & PROCESS_TERM_EXIT ) 145 printk("\n[%s] thread[%x,%x] exit : child process %x terminated / cycle %d\n", 146 __FUNCTION__, pid, this->trdid, child_pid, (uint32_t)cycle ); 153 147 if( child_state & PROCESS_TERM_KILL ) 154 printk("\n[ DBG] %s : thread %x in process %x exit / child%x killed / cycle %d\n",155 __FUNCTION__, this, process->pid, child_pid, (uint32_t)cycle );148 printk("\n[%s] thread[%x,%x] exit : child process %x killed / cycle %d\n", 149 __FUNCTION__, pid, this->trdid, child_pid, (uint32_t)cycle ); 156 150 if( child_state & PROCESS_TERM_STOP ) 157 printk("\n[ DBG] %s : thread %x in process %x exit / child%x stopped / cycle %d\n",158 __FUNCTION__, this, process->pid, child_pid, (uint32_t)cycle );151 printk("\n[%s] thread[%x,%x] exit : child process %x stopped / cycle %d\n", 152 __FUNCTION__, pid, this->trdid, child_pid, (uint32_t)cycle ); 159 153 } 160 154 #endif … … 165 159 } // end loop on children 166 160 167 // we execute this code when no child terminated:161 // we execute this code when no child change detected 168 162 // - release the lock protecting children list, 169 163 // - block on the WAIT condition … … 179 173 cycle = hal_get_cycles(); 180 174 if( DEBUG_SYS_WAIT < cycle ) 181 printk("\n[ DBG] %s : thread %x in process %xblock & deschedule / cycle %d\n",182 __FUNCTION__, this, process->pid, (uint32_t)cycle );175 printk("\n[%s] thread[%x,%x] block & deschedule / cycle %d\n", 176 __FUNCTION__, pid, this->trdid, (uint32_t)cycle ); 183 177 #endif 184 178 … … 189 183 cycle = hal_get_cycles(); 190 184 if( DEBUG_SYS_WAIT < cycle ) 191 printk("\n[ DBG] %s : thread %x in process %x unblock &resume / cycle %d\n",192 __FUNCTION__, this, process->pid, (uint32_t)cycle );185 printk("\n[%s] thread[%x,%x] resume / cycle %d\n", 186 __FUNCTION__, pid, this->trdid, (uint32_t)cycle ); 193 187 #endif 194 188 -
trunk/kernel/syscalls/sys_write.c
r624 r625 2 2 * sys_write.c - Kernel function implementing the "write" system call. 3 3 * 4 * Author Alain Greiner (2016,2017,2018 )4 * Author Alain Greiner (2016,2017,2018,2019) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 76 76 77 77 #if DEBUG_SYS_WRITE 78 tm_start = hal_get_cycles();79 78 if( DEBUG_SYS_WRITE < tm_start ) 80 printk("\n[%s] thread[%x,%x] enter / vaddr %x / count %d/ cycle %d\n",79 printk("\n[%s] thread[%x,%x] enter / vaddr %x / %d bytes / cycle %d\n", 81 80 __FUNCTION__, process->pid, this->trdid, vaddr, count, (uint32_t)tm_start ); 82 81 #endif … … 140 139 hal_enable_irq( &save_sr ); 141 140 142 // action depend on file type143 if( file_type == INODE_TYPE_FILE ) // write to file mapper141 // action depend on file type 142 if( file_type == INODE_TYPE_FILE ) // write to a file mapper 144 143 { 145 144 // check file writable … … 180 179 xptr_t inode_xp = XPTR( file_cxy , inode_ptr ); 181 180 vfs_inode_update_size( inode_xp , file_offset + count ); 182 183 181 } 184 182 else if( file_type == INODE_TYPE_DEV ) // write to TXT device -
trunk/libs/libalmosmkh/almosmkh.h
r623 r625 135 135 * It can be called by any thread running in any cluster. 136 136 *************************************************************************************** 137 * @ cxy : [in] target cluster identifier. 137 138 * @ pid : [in] process identifier. 138 139 * @ return 0 if success / return -1 if illegal argument. -
trunk/libs/mini-libc/stdio.c
r624 r625 403 403 404 404 // check stream valid 405 if( stream->key != VALID_OPEN_FILE ) return EOF; 405 if( stream->key != VALID_OPEN_FILE ) 406 { 407 printf("\n[error in %s] stream %x non registered\n", __FUNCTION__, stream ); 408 return -1; 409 } 406 410 407 411 va_start( args, format ); … … 409 413 va_end( args ); 410 414 415 // check format 411 416 if ( count < 0 ) 412 417 { 413 display_string( "fprintf : xprintf failure");418 printf("\n[error in %s] unsupported format %s\n", __FUNCTION__, format ); 414 419 return -1; 415 420 } 416 else 417 { 418 // get file descriptor from file pointer 419 fd = stream->fd; 420 421 // set terminating NUL 422 string[count] = 0; 423 424 printf("\n[%s] fd = %d for string : %s\n", __FUNCTION__, fd, string ); 425 426 idbg(); 427 428 // copy string to file 429 writen = write( fd , &string , count ); 430 431 if( writen != count ) 432 { 433 display_string( "fprintf : write failure" ); 434 return -1; 435 } 436 437 idbg(); 438 439 return writen; 440 } 421 422 // get file descriptor from file pointer 423 fd = stream->fd; 424 425 // set terminating NUL 426 string[count] = 0; 427 428 // copy string to file 429 writen = write( fd , &string , count ); 430 431 // check write 432 if(writen != count ) 433 { 434 printf("\n[error in %s] cannot write to stream %s\n", __FUNCTION__, stream ); 435 return -1; 436 } 437 438 return writen; 439 441 440 } // end fprintf() 442 441 -
trunk/libs/mini-libc/unistd.c
r589 r625 2 2 * unistd.c - User level <unistd> library implementation. 3 3 * 4 * Author Alain Greiner (2016,2017,2018 )4 * Author Alain Greiner (2016,2017,2018,2019) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites -
trunk/params-hard.mk
r624 r625 5 5 Y_SIZE = 2 6 6 NB_PROCS = 1 7 NB_TTYS = 37 NB_TTYS = 2 8 8 IOC_TYPE = IOC_BDV 9 9 TXT_TYPE = TXT_TTY -
trunk/user/init/init.c
r623 r625 78 78 { 79 79 // INIT display CHILD[i] process PID 80 snprintf( string , 64 , "[init] created KSH[%d] / pid = %x", i , ret_fork );80 snprintf( string , 64 , "[init] (pid 0x1) created ksh[%d] (pid %x)", i , ret_fork ); 81 81 display_string( string ); 82 82 -
trunk/user/ksh/ksh.c
r624 r625 54 54 #define LOG_DEPTH (32) // max number of registered commands 55 55 #define MAX_ARGS (32) // max number of arguments in a command 56 #define PATH_MAX_SIZE (256) // max number of characters in a pathname 56 57 57 58 #define DEBUG_MAIN 0 58 59 #define DEBUG_INTER 0 59 #define DEBUG_ PARSE060 #define DEBUG_EXECUTE 0 60 61 #define DEBUG_CMD_CAT 0 61 62 #define DEBUG_CMD_CP 0 … … 90 91 ////////////////////////////////////////////////////////////////////////////////////////// 91 92 92 ksh_cmd_t command[]; // array of supported commands 93 94 log_entry_t log_entries[LOG_DEPTH]; // array of registered commands 95 96 unsigned int ptw; // write pointer in log_entries[] 97 unsigned int ptr; // read pointer in log_entries[] 98 99 pthread_attr_t attr; // interactive thread attributes 100 101 sem_t semaphore; // block interactive thread when zero 102 103 pthread_t trdid; // interactive thread identifier 93 ksh_cmd_t command[]; // array of supported commands 94 95 log_entry_t log_entries[LOG_DEPTH]; // array of registered commands 96 97 unsigned int ptw; // write pointer in log_entries[] 98 unsigned int ptr; // read pointer in log_entries[] 99 100 pthread_attr_t attr; // interactive thread attributes 101 102 sem_t semaphore; // block interactive thread when zero 103 104 pthread_t trdid; // interactive thread identifier 105 106 char pathname[PATH_MAX_SIZE]; // pathname for a file 107 108 char pathnew[PATH_MAX_SIZE]; // used by the rename command 104 109 105 110 ////////////////////////////////////////////////////////////////////////////////////////// … … 110 115 static void cmd_cat( int argc , char **argv ) 111 116 { 112 char * path;113 117 struct stat st; 114 118 int fd; … … 128 132 } 129 133 130 path = argv[1];134 strcpy( pathname , argv[1] ); 131 135 132 136 // open the file 133 fd = open( path , O_RDONLY , 0 );137 fd = open( pathname , O_RDONLY , 0 ); 134 138 if (fd < 0) 135 139 { 136 printf(" error: cannot open file <%s>\n", path );140 printf(" error: cannot open file <%s>\n", pathname ); 137 141 138 142 sem_post( &semaphore ); … … 141 145 142 146 #if DEBUG_CMD_CAT 143 snprintf( string , 64 , "[ksh] %s : file %s open", __FUNCTION__, path );147 snprintf( string , 64 , "[ksh] %s : file %s open", __FUNCTION__, pathname ); 144 148 display_string( string ); 145 149 #endif 146 150 147 151 // get file stats 148 if ( stat( path , &st ) == -1)149 { 150 printf(" error: cannot stat <%s>\n", path );152 if ( stat( pathname , &st ) == -1) 153 { 154 printf(" error: cannot stat <%s>\n", pathname ); 151 155 152 156 close(fd); … … 157 161 if ( S_ISDIR(st.st_mode) ) 158 162 { 159 printf(" error: <%s> is a directory\n", path );163 printf(" error: <%s> is a directory\n", pathname ); 160 164 161 165 close(fd); … … 174 178 if( size == 0 ) 175 179 { 176 printf(" error: size = 0 for <%s>\n", path );180 printf(" error: size = 0 for <%s>\n", pathname ); 177 181 178 182 close(fd); … … 186 190 if ( buf == NULL ) 187 191 { 188 printf(" error: cannot map file <%s>\n", path );192 printf(" error: cannot map file <%s>\n", pathname ); 189 193 190 194 close(fd); … … 196 200 snprintf( string , 64 , "[ksh] %s : maped file %d to buffer %x", __FUNCTION__, fd , buf ); 197 201 display_string( string ); 198 // unsigned int pid = getpid();199 // unsigned int cxy = pid >> 16;200 // display_vmm( cxy , pid );201 202 #endif 202 203 … … 207 208 if( munmap( buf , size ) ) 208 209 { 209 printf(" error: cannot unmap file <%s>\n", path );210 printf(" error: cannot unmap file <%s>\n", pathname ); 210 211 } 211 212 … … 213 214 snprintf( string , 64 , "[ksh] %s : unmaped file %d from buffer %x", __FUNCTION__, fd , buf ); 214 215 display_string( string ); 215 // display_vmm( cxy , pid );216 216 #endif 217 217 … … 219 219 if( close( fd ) ) 220 220 { 221 printf(" error: cannot close file <%s>\n", path );221 printf(" error: cannot close file <%s>\n", pathname ); 222 222 } 223 223 … … 230 230 static void cmd_cd( int argc , char **argv ) 231 231 { 232 char * path;233 234 232 if (argc != 2) 235 233 { … … 238 236 else 239 237 { 240 path = argv[1];238 strcpy( pathname , argv[1] ); 241 239 242 240 // call the relevant syscall 243 if( chdir( path ) )244 { 245 printf(" error: cannot found <%s> directory\n", path );241 if( chdir( pathname ) ) 242 { 243 printf(" error: cannot found <%s> directory\n", pathname ); 246 244 } 247 245 } … … 257 255 int src_fd; 258 256 int dst_fd; 259 char * srcpath;260 char * dstpath;261 257 int size; // source file size 262 258 int bytes; // number of transfered bytes … … 276 272 } 277 273 278 srcpath = argv[1];279 dstpath = argv[2];280 281 274 // open the src file 282 src_fd = open( srcpath , O_RDONLY , 0 ); 275 strcpy( pathname , argv[1] ); 276 src_fd = open( pathname , O_RDONLY , 0 ); 283 277 284 278 if ( src_fd < 0 ) 285 279 { 286 280 dst_fd = -1; 287 printf(" error: cannot open <%s>\n", srcpath);281 printf(" error: cannot open <%s>\n", argv[1] ); 288 282 goto cmd_cp_exit; 289 283 } 290 284 291 285 #if DEBUG_CMD_CP 292 snprintf( string , 64 , "[ksh] %s : file %s open", __FUNCTION__, srcpath);286 snprintf( string , 64 , "[ksh] %s : file %s open", __FUNCTION__, argv[1] ); 293 287 display_string( string ); 294 288 #endif 295 289 296 290 // get file stats 297 if ( stat( srcpath, &st ) )291 if ( stat( pathname , &st ) ) 298 292 { 299 293 dst_fd = -1; 300 printf(" error: cannot stat <%s>\n", srcpath);294 printf(" error: cannot stat <%s>\n", argv[1] ); 301 295 goto cmd_cp_exit; 302 296 } 303 297 304 298 #if DEBUG_CMD_CP 305 snprintf( string , 64 , "[ksh] %s : got stats for %s", __FUNCTION__, srcpath);299 snprintf( string , 64 , "[ksh] %s : got stats for %s", __FUNCTION__, argv[1] ); 306 300 display_string( string ); 307 301 #endif … … 310 304 { 311 305 dst_fd = -1; 312 printf(" error: <%s> is a directory\n", srcpath);306 printf(" error: <%s> is a directory\n", argv[1] ); 313 307 goto cmd_cp_exit; 314 308 } … … 318 312 319 313 // open the dst file 320 dst_fd = open( dstpath , O_CREAT|O_TRUNC|O_RDWR , 0 ); 314 strcpy( pathname , argv[2] ); 315 dst_fd = open( pathname , O_CREAT|O_TRUNC|O_RDWR , 0 ); 321 316 322 317 if ( dst_fd < 0 ) 323 318 { 324 printf(" error: cannot open <%s>\n", dstpath);319 printf(" error: cannot open <%s>\n", argv[2] ); 325 320 goto cmd_cp_exit; 326 321 } 327 322 328 323 #if DEBUG_CMD_CP 329 snprintf( string , 64 , "[ksh] %s : file %s open", __FUNCTION__, dstpath);330 display_string( string ); 331 #endif 332 333 if ( stat( dstpath, &st ) )334 { 335 printf(" error: cannot stat <%s>\n", dstpath);324 snprintf( string , 64 , "[ksh] %s : file %s open", __FUNCTION__, argv[2] ); 325 display_string( string ); 326 #endif 327 328 if ( stat( pathname , &st ) ) 329 { 330 printf(" error: cannot stat <%s>\n", argv[2] ); 336 331 goto cmd_cp_exit; 337 332 } 338 333 339 334 #if DEBUG_CMD_CP 340 snprintf( string , 64 , "[ksh] %s : got stats for %s", __FUNCTION__, dstpath);335 snprintf( string , 64 , "[ksh] %s : got stats for %s", __FUNCTION__, argv[2] ); 341 336 display_string( string ); 342 337 #endif … … 344 339 if ( S_ISDIR(st.st_mode ) ) 345 340 { 346 printf(" error: <%s> is a directory\n", dstpath);341 printf(" error: <%s> is a directory\n", argv[2] ); 347 342 goto cmd_cp_exit; 348 343 } … … 357 352 if ( read( src_fd , buf , len ) != len ) 358 353 { 359 printf(" error: cannot read from file <%s>\n", srcpath);354 printf(" error: cannot read from file <%s>\n", argv[1] ); 360 355 goto cmd_cp_exit; 361 356 } 362 357 363 358 #if DEBUG_CMD_CP 364 snprintf( string , 64 , "[ksh] %s : read %d bytes from %s", __FUNCTION__, len, srcpath);359 snprintf( string , 64 , "[ksh] %s : read %d bytes from %s", __FUNCTION__, len, argv[1] ); 365 360 display_string( string ); 366 361 #endif … … 369 364 if ( write( dst_fd , buf , len ) != len ) 370 365 { 371 printf(" error: cannot write to file <%s>\n", dstpath);366 printf(" error: cannot write to file <%s>\n", argv[2] ); 372 367 goto cmd_cp_exit; 373 368 } 374 369 375 370 #if DEBUG_CMD_CP 376 snprintf( string , 64 , "[ksh] %s : write %d bytes to %s", __FUNCTION__, len, dstpath);371 snprintf( string , 64 , "[ksh] %s : write %d bytes to %s", __FUNCTION__, len, argv[2] ); 377 372 display_string( string ); 378 373 #endif … … 662 657 int ret_exec; // return value from exec 663 658 unsigned int ksh_pid; // KSH process PID 664 char * pathname; // path to .elf file665 659 unsigned int background; // background execution if non zero 666 660 unsigned int placement; // placement specified if non zero … … 677 671 else 678 672 { 679 pathname = argv[1];673 strcpy( pathname , argv[1] ); 680 674 681 675 if( argc == 2 ) … … 707 701 } 708 702 709 /*710 // take semaphore to block the interactive thread711 if ( sem_wait( &semaphore ) )712 {713 printf("\n[ksh error] cannot found semafore\n" );714 exit( 1 );715 }716 */717 703 // get KSH process PID 718 704 ksh_pid = getpid(); … … 767 753 display_string( string ); 768 754 #endif 755 // when the new process is launched in background, the KSH process 756 // takes the TXT ownership, and release the semaphore to get the next command. 757 // Otherwise, the child process keep the TXT ownership, and the semaphore will 758 // be released by the KSH main thread when the child process exit 769 759 770 760 if( background ) // KSH must keep TXT ownership … … 776 766 sem_post( &semaphore ); 777 767 } 778 else // KSH loosed TXT ownership779 {780 // semaphore will be released by the KSH main thread781 // when the loaded process exit782 }783 768 } 784 769 } … … 812 797 static void cmd_ls( int argc , char **argv ) 813 798 { 814 char * pathname = NULL;815 799 struct dirent * entry; 816 800 DIR * dir; … … 830 814 // get target directory path 831 815 if ( argc == 1 ) strcpy( pathname , "." ); 832 else pathname = argv[1];816 else strcpy( pathname , argv[1] ); 833 817 834 818 // open target directory … … 874 858 static void cmd_mkdir( int argc , char **argv ) 875 859 { 876 char * pathname;877 878 860 if (argc != 2) 879 861 { … … 882 864 else 883 865 { 884 pathname = argv[1];866 strcpy( pathname , argv[1] ); 885 867 886 868 mkdir( pathname , 0x777 ); … … 895 877 static void cmd_mv( int argc , char **argv ) 896 878 { 897 char * old_path;898 char * new_path;899 900 879 if (argc != 3) 901 880 { … … 904 883 else 905 884 { 906 old_path = argv[1];907 new_path = argv[2];885 strcpy( pathname , argv[1] ); 886 strcpy( pathnew , argv[2] ); 908 887 909 888 // call the relevant syscall 910 if( rename( old_path , new_path) )911 { 912 printf(" error: unable to rename <%s> to <%s>\n", old_path, new_path);889 if( rename( pathname , pathnew ) ) 890 { 891 printf(" error: unable to rename <%s> to <%s>\n", pathname , pathnew ); 913 892 } 914 893 } … … 967 946 static void cmd_pwd( int argc , char **argv ) 968 947 { 969 char buf[1024];970 971 948 if (argc != 1) 972 949 { … … 975 952 else 976 953 { 977 if ( getcwd( buf , 1024) )954 if ( getcwd( pathname , PATH_MAX_SIZE ) ) 978 955 { 979 956 printf(" error: unable to get current directory\n"); … … 981 958 else 982 959 { 983 printf("%s\n", buf);960 printf("%s\n", pathname ); 984 961 } 985 962 } … … 993 970 static void cmd_rm( int argc , char **argv ) 994 971 { 995 char * pathname;996 997 972 if (argc != 2) 998 973 { … … 1001 976 else 1002 977 { 1003 pathname = argv[1];978 strcpy( pathname , argv[1] ); 1004 979 1005 980 if ( unlink( pathname ) ) … … 1018 993 { 1019 994 // same as cmd_rm() 1020 cmd_rm (argc, argv);995 cmd_rm (argc , argv ); 1021 996 } 1022 997 … … 1103 1078 // This function analyses one command (with arguments), executes it, and returns. 1104 1079 //////////////////////////////////////////////////////////////////////////////////// 1105 static void __attribute__ ((noinline)) parse( char * buf )1080 static void __attribute__ ((noinline)) execute( char * buf ) 1106 1081 { 1107 1082 int argc = 0; … … 1110 1085 int len = strlen(buf); 1111 1086 1112 #if DEBUG_PARSE 1113 char string[64]; 1114 snprintf( string , 64 , "\n[ksh] %s : <%s>", __FUNCTION__ , buf ); 1115 display_string( string ); 1087 #if DEBUG_EXECUTE 1088 printf("\n[ksh] %s : command <%s>\n", 1089 __FUNCTION__ , buf ); 1116 1090 #endif 1117 1091 … … 1134 1108 } 1135 1109 1136 #if DEBUG_PARSE 1137 snprintf( string , 64 , "\n[ksh] %s : argc = %d for <%s>", __FUNCTION__ , argc , argv[0] ); 1138 display_string( string ); 1139 #endif 1140 1141 // analyse command type 1142 if (argc > 0) 1143 { 1144 int found = 0; 1145 1146 // try to match typed command 1147 for ( i = 0 ; command[i].name ; i++ ) 1148 { 1149 if (strcmp(argv[0], command[i].name) == 0) 1150 { 1151 command[i].fn(argc, argv); 1152 found = 1; 1153 break; 1154 } 1110 // check command 1111 if (argc == 0) 1112 { 1113 // release semaphore to get next command 1114 sem_post( &semaphore ); 1115 } 1116 1117 #if DEBUG_EXECUTE 1118 printf("\n[ksh] %s : argc %d / arg0 %s / arg1 %s\n", 1119 __FUNCTION__ , argc , argv[0], argv[1] ); 1120 #endif 1121 1122 // scan the list of commands to match typed command 1123 int found = 0; 1124 for ( i = 0 ; (command[i].name != NULL) && (found == 0) ; i++ ) 1125 { 1126 if (strcmp(argv[0], command[i].name) == 0) 1127 { 1128 command[i].fn(argc, argv); 1129 found = 1; 1155 1130 } 1156 1157 if (!found) // undefined command 1158 { 1159 printf(" error : undefined command <%s>\n", argv[0]); 1160 1161 // release semaphore to get next command 1162 sem_post( &semaphore ); 1163 } 1164 } 1165 } // end parse() 1131 } 1132 1133 // check undefined command 1134 if (!found) 1135 { 1136 printf(" error : undefined command <%s>\n", argv[0]); 1137 1138 // release semaphore to get next command 1139 sem_post( &semaphore ); 1140 } 1141 } // end execute() 1166 1142 1167 1143 /////////////////////////////// … … 1177 1153 1178 1154 #if DEBUG_INTER 1179 char string[64]; 1180 #endif 1181 1182 /* To lauch one command without interactive mode 1155 char string[128]; 1156 #endif 1157 1158 /* 1159 // To lauch one or several commands without interactive mode 1160 1161 // 1. first command 1183 1162 if( sem_wait( &semaphore ) ) 1184 1163 { … … 1188 1167 else 1189 1168 { 1190 printf("\n[ksh] load bin/user/ sort.elf\n");1169 printf("\n[ksh] load bin/user/pgcd.elf\n"); 1191 1170 } 1192 1171 1193 strcpy( cmd , "load bin/user/sort.elf" ); 1194 parse( cmd ); 1172 strcpy( cmd , "load bin/user/pgcd.elf" ); 1173 execute( cmd ); 1174 1175 // 2. second command 1176 if( sem_wait( &semaphore ) ) 1177 { 1178 printf("\n[ksh error] cannot found semafore\n" ); 1179 exit( 1 ); 1180 } 1181 else 1182 { 1183 printf("\n[ksh] ls home\n"); 1184 } 1185 1186 strcpy( cmd , "ls home" ); 1187 execute( cmd ); 1188 1189 // end non-interactive mode 1195 1190 */ 1196 1191 … … 1227 1222 { 1228 1223 // initialize command buffer 1229 memset( cmd, 0x20 , sizeof(cmd) ); // TODO useful ?1224 // memset( cmd , 0x20 , sizeof(cmd) ); // TODO useful ? 1230 1225 count = 0; 1231 1226 state = NORMAL; … … 1234 1229 #if DEBUG_INTER 1235 1230 unsigned int pid = getpid(); 1236 snprintf( string , 64, "\n[ksh] %s : request a new command", __FUNCTION__ );1231 snprintf( string , 128 , "\n[ksh] %s : request a new command", __FUNCTION__ ); 1237 1232 display_string( string ); 1238 1233 #endif … … 1263 1258 cmd[count] = 0; 1264 1259 count++; 1265 1266 // register command in log 1260 #if DEBUG_INTER 1261 snprintf( string , 128 , "[ksh] %s : get command <%s> / &log = %x / ptw = %d / &ptw = %x", 1262 __FUNCTION__, cmd , log_entries[ptw].buf , ptw , &ptw ); 1263 display_string( string ); 1264 display_vmm( 0 , 2 ); 1265 #endif 1266 // register command in log_entries[] array 1267 1267 strncpy( log_entries[ptw].buf , cmd , count ); 1268 1268 log_entries[ptw].count = count; … … 1271 1271 1272 1272 #if DEBUG_INTER 1273 snprintf( string , 64 , "[ksh] %s : parse andexecute <%s>", __FUNCTION__, cmd );1273 snprintf( string , 128 , "[ksh] %s : execute <%s>", __FUNCTION__, cmd ); 1274 1274 display_string( string ); 1275 1275 #endif … … 1277 1277 putchar( c ); 1278 1278 1279 // call parser to analyse andexecute command1280 parse( cmd );1279 // execute command 1280 execute( cmd ); 1281 1281 } 1282 1282 else // no command registered … … 1391 1391 1392 1392 #if DEBUG_INTER 1393 snprintf( string , 64, "\n[ksh] %s : complete <%s> command", __FUNCTION__, cmd );1393 snprintf( string , 128 , "\n[ksh] %s : complete <%s> command", __FUNCTION__, cmd ); 1394 1394 display_string( string ); 1395 1395 #endif -
trunk/user/pgcd/pgcd.c
r580 r625 24 24 get_core( &cxy , &lid ); 25 25 26 printf( "\n\n[ PGCD] starts on core[%x,%d] / cycle %d\n",26 printf( "\n\n[pgcd] starts on core[%x,%d] / cycle %d\n", 27 27 cxy , lid , (unsigned int)cycle ); 28 28 -
trunk/user/sort/sort.c
r624 r625 29 29 #include <hal_macros.h> 30 30 31 #define ARRAY_LENGTH 1024// number of items31 #define ARRAY_LENGTH 256 // number of items 32 32 #define MAX_THREADS 1024 // 16 * 16 * 4 33 33 … … 412 412 #endif 413 413 414 #if CHECK_RESULT 415 int success = 1; 416 int* res_array = ( (total_threads == 2) || 417 (total_threads == 8) || 418 (total_threads == 32) || 419 (total_threads == 128) || 420 (total_threads == 512) ) ? array1 : array0; 421 422 for( n=0 ; n<(ARRAY_LENGTH-2) ; n++ ) 423 { 424 if ( res_array[n] > res_array[n+1] ) 425 { 426 printf("\n[sort] array[%d] = %d > array[%d] = %d\n", 427 n , res_array[n] , n+1 , res_array[n+1] ); 428 success = 0; 429 break; 430 } 431 } 432 433 if ( success ) printf("\n[sort] success\n"); 434 else printf("\n[sort] failure\n"); 414 #if CHECK_RESULT 415 416 int success = 1; 417 int * res_array = ( (total_threads == 2) || 418 (total_threads == 8) || 419 (total_threads == 32) || 420 (total_threads == 128) || 421 (total_threads == 512) ) ? array1 : array0; 422 423 for( n=0 ; n<(ARRAY_LENGTH-2) ; n++ ) 424 { 425 if ( res_array[n] > res_array[n+1] ) 426 { 427 printf("\n[sort] array[%d] = %d > array[%d] = %d\n", 428 n , res_array[n] , n+1 , res_array[n+1] ); 429 success = 0; 430 break; 431 } 432 } 433 434 if ( success ) printf("\n[sort] success\n"); 435 else printf("\n[sort] failure\n"); 436 435 437 #endif 436 438 437 439 #if INSTRUMENTATION 438 char name[64]; 439 char path[128]; 440 441 // build a file name from n_items / n_clusters / n_cores 442 if( USE_DQT_BARRIER ) snprintf( name , 64 , "sort_dqt_%d_%d_%d", 443 ARRAY_LENGTH, x_size * y_size, ncores ); 444 else snprintf( name , 64 , "sort_smp_%d_%d_%d", 445 ARRAY_LENGTH, x_size * y_size, ncores ); 446 447 // build file pathname 448 snprintf( path , 128 , "home/%s" , name ); 449 450 // compute results 451 unsigned int sequencial = (unsigned int)(seq_end_cycle - start_cycle); 452 unsigned int parallel = (unsigned int)(para_end_cycle - seq_end_cycle); 453 454 // display results on process terminal 455 printf("\n----- %s -----\n" 456 " - sequencial : %d cycles\n" 457 " - parallel : %d cycles\n", 458 name, sequencial, parallel ); 459 460 // open file 461 FILE * stream = fopen( path , NULL ); 462 if( stream == NULL ) 463 { 464 printf("\n[sort error] cannot open instrumentation file <%s>\n", name ); 465 exit(0); 466 } 467 468 // register results to file 469 int ret = fprintf( stream , "\n----- %s -----\n" 470 " - sequencial : %d cycles\n" 471 " - parallel : %d cycles\n", name, sequencial, parallel ); 472 if( ret < 0 ) 473 { 474 printf("\n[sort error] cannot write to instrumentation file <%s>\n", name ); 475 exit(0); 476 } 477 478 // close instrumentation file 479 if( fclose( stream ) ) 480 { 481 printf("\n[sort error] cannot close instrumentation file <%s>\n", name ); 482 exit(0); 483 } 440 441 char name[64]; 442 char path[128]; 443 444 // build a file name from n_items / n_clusters / n_cores 445 if( USE_DQT_BARRIER ) snprintf( name , 64 , "sort_dqt_%d_%d_%d", 446 ARRAY_LENGTH, x_size * y_size, ncores ); 447 else snprintf( name , 64 , "sort_smp_%d_%d_%d", 448 ARRAY_LENGTH, x_size * y_size, ncores ); 449 450 // build file pathname 451 snprintf( path , 128 , "home/%s" , name ); 452 453 // compute results 454 unsigned int sequencial = (unsigned int)(seq_end_cycle - start_cycle); 455 unsigned int parallel = (unsigned int)(para_end_cycle - seq_end_cycle); 456 457 // display results on process terminal 458 printf("\n----- %s -----\n" 459 " - sequencial : %d cycles\n" 460 " - parallel : %d cycles\n", 461 name, sequencial, parallel ); 462 463 // open file 464 FILE * stream = fopen( path , NULL ); 465 if( stream == NULL ) 466 { 467 printf("\n[sort error] cannot open instrumentation file <%s>\n", name ); 468 exit(0); 469 } 470 471 printf("\n[sort] file %s successfully open\n", path); 472 473 // register results to file 474 int ret = fprintf( stream , "\n----- %s -----\n" 475 " - sequencial : %d cycles\n" 476 " - parallel : %d cycles\n", name, sequencial, parallel ); 477 if( ret < 0 ) 478 { 479 printf("\n[sort error] cannot write to instrumentation file <%s>\n", name ); 480 exit(0); 481 } 482 483 printf("\n[sort] file %s successfully written\n", path); 484 485 // close instrumentation file 486 487 if( fclose( stream ) ) 488 { 489 printf("\n[sort error] cannot close the file <%s>\n", name ); 490 exit(0); 491 } 492 493 printf("\n[sort] file %s successfully closed\n", path); 494 484 495 #endif 485 496
Note: See TracChangeset
for help on using the changeset viewer.