Changeset 629 for trunk/kernel
- Timestamp:
- May 17, 2019, 9:27:04 AM (6 years ago)
- Location:
- trunk/kernel
- Files:
-
- 20 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/kernel/fs/fatfs.h
r628 r629 181 181 * 182 182 * WARNING 2 : Most fields are constant values, but the <free_cluster_hint>, 183 * <free_clusters>, <dirty_page_min>, <dirty_page_max>, <lock>, and the <fs_info_buffer> 184 * are shared variables, that can be modified by any thread running in any cluster. 185 * The <fs_info_buffer> contains a copy of the FS_INFO sector, and is only allocated in 186 * the FAT cluster (i.e. in cluster 0). It is used by all to synchronously update the 187 * free clusters info on IOC device. 183 * <free_clusters>, <lock>, and the <fs_info_buffer> are shared variables, 184 * that can be modified by any thread running in any cluster. The <fs_info_buffer> 185 * contains a copy of the FS_INFO sector, and is only allocated in the FAT cluster 186 * (cluster 0). It is used to synchronously update the free clusters info on IOC device. 188 187 * => For all these variables, only the values stored in the FAT cluster must be used. 189 188 ****************************************************************************************/ … … 202 201 203 202 /* shared variables (only the copy in FAT cluster must be used) */ 204 uint32_t dirty_page_min; /*! min dirty page index in FAT mapper */205 uint32_t dirty_page_max; /*! max dirty page index in FAT mapper */206 203 uint32_t free_cluster_hint; /*! cluster[hint+1] is the first free */ 207 204 uint32_t free_clusters; /*! free clusters number */ -
trunk/kernel/fs/vfs.c
r628 r629 2608 2608 #if (DEBUG_VFS_LOOKUP & 1) 2609 2609 if( DEBUG_VFS_LOOKUP < cycle ) 2610 printk("\n[%s] thread[%x,%x] created missing inode <%s> in cluster %x\n",2610 printk("\n[%s] thread[%x,%x] created missing inode for <%s> in cluster %x\n", 2611 2611 __FUNCTION__, process->pid, this->trdid, name, child_cxy ); 2612 2612 #endif … … 2629 2629 2630 2630 // when the missing dentry is not in the parent mapper, 2631 // it is a new dentry thatmust be registered in parent directory mapper2631 // a new dentry must be registered in parent directory mapper 2632 2632 if ( error ) 2633 2633 { … … 2814 2814 2815 2815 // 1. allocate one free cluster in file system to child inode, 2816 // and update the File Allocation Table in both the TAFmapper and IOC device.2816 // and update the File Allocation Table in both the FAT mapper and IOC device. 2817 2817 // It depends on the child inode FS type. 2818 2818 vfs_ctx_t * ctx = hal_remote_lpt( XPTR( child_cxy , &child_ptr->ctx ) ); -
trunk/kernel/kern/kernel_init.c
r628 r629 171 171 "VFS_FILE", // 33 172 172 "VMM_VSL", // 34 173 "VMM_GPT", // 35 174 "VFS_MAIN", // 36 175 "FATFS_FAT", // 37 173 "VFS_MAIN", // 35 174 "FATFS_FAT", // 36 176 175 }; 177 176 -
trunk/kernel/kern/process.c
r626 r629 166 166 #endif 167 167 168 // initialize GPT and VSL locks 169 remote_rwlock_init( XPTR( local_cxy , &vmm->gpt_lock ) , LOCK_VMM_GPT ); 168 // initialize VSL locks 170 169 remote_rwlock_init( XPTR( local_cxy , &vmm->vsl_lock ) , LOCK_VMM_VSL ); 171 170 … … 426 425 427 426 // initialize GPT and VSL locks 428 remote_rwlock_init( XPTR( local_cxy , &vmm->gpt_lock ) , LOCK_VMM_GPT );429 427 remote_rwlock_init( XPTR( local_cxy , &vmm->vsl_lock ) , LOCK_VMM_VSL ); 430 428 … … 1482 1480 1483 1481 // set COW flag in DATA, ANON, REMOTE vsegs for parent process VMM 1484 // this includes all par net process copies in all clusters1482 // this includes all parent process copies in all clusters 1485 1483 if( parent_process_cxy == local_cxy ) // reference is local 1486 1484 { … … 1707 1705 1708 1706 // initialize VSL and GPT locks 1709 remote_rwlock_init( XPTR( local_cxy , &vmm->vsl_lock ) , LOCK_VMM_VSL ); 1710 remote_rwlock_init( XPTR( local_cxy , &vmm->gpt_lock ) , LOCK_VMM_GPT ); 1707 remote_rwlock_init( XPTR( local_cxy , &vmm->vsl_lock ) , LOCK_VMM_VSL ); 1711 1708 1712 1709 // create kernel vsegs in GPT and VSL, as required by the hardware architecture -
trunk/kernel/kern/rpc.c
r628 r629 104 104 "VFS_FILE_CREATE", // 14 105 105 "VFS_FILE_DESTROY", // 15 106 "VFS_FS_ GET_DENTRY", // 16106 "VFS_FS_NEW_DENTRY", // 16 107 107 "VFS_FS_ADD_DENTRY", // 17 108 108 "VFS_FS_REMOVE_DENTRY", // 18 -
trunk/kernel/kern/scheduler.c
r625 r629 63 63 // @ returns pointer on selected thread descriptor 64 64 //////////////////////////////////////////////////////////////////////////////////////////// 65 thread_t * sched_select( scheduler_t * sched )65 static thread_t * sched_select( scheduler_t * sched ) 66 66 { 67 67 thread_t * thread; … … 248 248 uint32_t cycle = (uint32_t)hal_get_cycles(); 249 249 if( DEBUG_SCHED_HANDLE_SIGNALS < cycle ) 250 printk("\n[%s] thread[%x,%x] on core[%x,%d] deleted / %d threads/ cycle %d\n",250 printk("\n[%s] thread[%x,%x] on core[%x,%d] deleted (still %d threads) / cycle %d\n", 251 251 __FUNCTION__, process->pid, thread->trdid, local_cxy, thread->core->lid, count, cycle ); 252 #endif 253 254 #if CONFIG_INSTRUMENTATION_PGFAULTS 255 uint32_t local_nr = thread->info.local_pgfault_nr; 256 uint32_t local_cost = (local_nr == 0) ? 0 : (thread->info.local_pgfault_cost / local_nr); 257 uint32_t global_nr = thread->info.global_pgfault_nr; 258 uint32_t global_cost = (global_nr == 0) ? 0 : (thread->info.global_pgfault_cost / global_nr); 259 uint32_t false_nr = thread->info.false_pgfault_nr; 260 uint32_t false_cost = (false_nr == 0) ? 0 : (thread->info.false_pgfault_cost / false_nr); 261 printk("***** page faults for thread[%x,%x]\n" 262 " - %d local : %d cycles\n" 263 " - %d global : %d cycles\n" 264 " - %d false : %d cycles\n", 265 process->pid, thread->trdid, 266 local_nr, local_cost, 267 global_nr, global_cost, 268 false_nr, false_cost ); 252 269 #endif 253 270 // destroy process descriptor if last thread … … 481 498 482 499 #if (DEBUG_SCHED_YIELD & 0x1) 483 // if( sched->trace ) 484 if( (uint32_t)hal_get_cycles() > DEBUG_SCHED_YIELD ) 500 if( sched->trace ) 485 501 sched_display( lid ); 486 502 #endif … … 535 551 536 552 #if DEBUG_SCHED_YIELD 537 // if( sched->trace ) 538 if( (uint32_t)hal_get_cycles() > DEBUG_SCHED_YIELD ) 553 if( sched->trace ) 539 554 printk("\n[%s] core[%x,%d] / cause = %s\n" 540 555 " thread %x (%s) (%x,%x) => thread %x (%s) (%x,%x) / cycle %d\n", … … 553 568 554 569 #if (DEBUG_SCHED_YIELD & 1) 555 // if( sched->trace ) 556 if( (uint32_t)hal_get_cycles() > DEBUG_SCHED_YIELD ) 570 if( sched->trace ) 557 571 printk("\n[%s] core[%x,%d] / cause = %s\n" 558 572 " thread %x (%s) (%x,%x) continue / cycle %d\n", -
trunk/kernel/kern/thread.c
r625 r629 907 907 908 908 // update target process instrumentation counter 909 process->vmm.pgfault_nr += thread->info.pgfault_nr;909 // process->vmm.pgfault_nr += thread->info.pgfault_nr; 910 910 911 911 // remove thread from process th_tbl[] -
trunk/kernel/kern/thread.h
r625 r629 100 100 typedef struct thread_info_s 101 101 { 102 uint32_t pgfault_nr; /*! cumulated number of page fault */ 103 cycle_t last_cycle; /*! last cycle counter value (date) */ 104 cycle_t usr_cycles; /*! user execution duration (cycles) */ 105 cycle_t sys_cycles; /*! system execution duration (cycles) */ 102 uint32_t false_pgfault_nr; /*! number of local page fault */ 103 uint32_t false_pgfault_cost; /*! cumulated cost */ 104 uint32_t local_pgfault_nr; /*! number of local page fault */ 105 uint32_t local_pgfault_cost; /*! cumulated cost */ 106 uint32_t global_pgfault_nr; /*! number of global page fault */ 107 uint32_t global_pgfault_cost; /*! cumulated cost */ 108 109 cycle_t last_cycle; /*! last cycle counter value (date) */ 110 cycle_t usr_cycles; /*! user execution duration (cycles) */ 111 cycle_t sys_cycles; /*! system execution duration (cycles) */ 106 112 } 107 113 thread_info_t; -
trunk/kernel/kernel_config.h
r628 r629 26 26 #define _KERNEL_CONFIG_H_ 27 27 28 #define CONFIG_ALMOS_VERSION "Version 2. 0 / April2019"28 #define CONFIG_ALMOS_VERSION "Version 2.1 / May 2019" 29 29 30 30 //////////////////////////////////////////////////////////////////////////////////////////// … … 40 40 41 41 #define DEBUG_BUSYLOCK 0 42 #define DEBUG_BUSYLOCK_PID 0 x10001// for busylock detailed debug43 #define DEBUG_BUSYLOCK_TRDID 0 x10000// for busylock detailed debug42 #define DEBUG_BUSYLOCK_PID 0 // for busylock detailed debug 43 #define DEBUG_BUSYLOCK_TRDID 0 // for busylock detailed debug 44 44 45 45 #define DEBUG_CHDEV_CMD_RX 0 … … 136 136 #define DEBUG_PROCESS_ZERO_CREATE 0 137 137 138 #define DEBUG_QUEUELOCK_TYPE 0 // lock type (0 : undefined / 1000 : all types) 138 #define DEBUG_QUEUELOCK_TYPE 0 // lock type 0 is undefined => no debug 139 #define DEBUG_QUEUELOCK_PTR 0 140 #define DEBUG_QUEUELOCK_CXY 0 139 141 140 142 #define DEBUG_RPC_CLIENT_GENERIC 0 … … 165 167 #define DEBUG_RPC_VMM_DELETE_VSEG 0 166 168 167 #define DEBUG_RWLOCK_TYPE 0 // lock type (0 : undefined / 1000 : all types) 169 #define DEBUG_RWLOCK_TYPE 35 // lock type 0 is undefined => no debug 170 #define DEBUG_RWLOCK_PTR 0xb1650 171 #define DEBUG_RWLOCK_CXY 0x11 168 172 169 173 #define DEBUG_SCHED_HANDLE_SIGNALS 2 … … 309 313 #define LOCK_VFS_FILE 33 // remote (RW) protect file descriptor state 310 314 #define LOCK_VMM_VSL 34 // remote (RW) protect VSL (local list of vsegs) 311 #define LOCK_VMM_GPT 35 // remote (RW) protect GPT (local page table) 312 #define LOCK_VFS_MAIN 36 // remote (RW) protect vfs traversal (in root inode) 313 #define LOCK_FATFS_FAT 37 // remote (RW) protect exclusive access to the FATFS FAT 315 #define LOCK_VFS_MAIN 35 // remote (RW) protect vfs traversal (in root inode) 316 #define LOCK_FATFS_FAT 36 // remote (RW) protect exclusive access to the FATFS FAT 314 317 315 318 … … 451 454 //////////////////////////////////////////////////////////////////////////////////////////// 452 455 453 #define CONFIG_INTRUMENTATION_SYSCALLS 0 456 #define CONFIG_INSTRUMENTATION_SYSCALLS 0 457 #define CONFIG_INSTRUMENTATION_PGFAULTS 1 454 458 455 459 -
trunk/kernel/libk/queuelock.c
r623 r629 2 2 * queuelock.c - local kernel lock with waiting queue implementation. 3 3 * 4 * Authors Alain Greiner (2016,2017,2018 )4 * Authors Alain Greiner (2016,2017,2018,2019) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 47 47 #if DEBUG_QUEUELOCK_TYPE 48 48 thread_t * this = CURRENT_THREAD; 49 if( DEBUG_QUEUELOCK_TYPE == type ) 49 if( (type == DEBUG_QUEUELOCK_TYPE) && 50 (lock == DEBUG_QUEUELOCK_PTR ) && 51 (local_cxy == DEBUG_QUEUELOCK_CXY ) ) 50 52 printk("\n[%s] thread[%x,%x] initialise lock %s [%x,%x]\n", 51 53 __FUNCTION__, this->process->pid, this->trdid, … … 75 77 76 78 #if DEBUG_QUEUELOCK_TYPE 77 if( (DEBUG_QUEUELOCK_TYPE == lock_type) || (DEBUG_QUEUELOCK_TYPE == 1000) ) 79 if( (lock_type == DEBUG_QUEUELOCK_TYPE) && 80 (lock == DEBUG_QUEUELOCK_PTR ) && 81 (local_cxy == DEBUG_QUEUELOCK_CXY ) ) 78 82 printk("\n[%s ] thread[%x,%x] BLOCK on q_lock %s [%x,%x]\n", 79 83 __FUNCTION__, this->process->pid, this->trdid, … … 100 104 101 105 #if DEBUG_QUEUELOCK_TYPE 102 if( (DEBUG_QUEUELOCK_TYPE == lock_type) || (DEBUG_QUEUELOCK_TYPE == 1000) ) 106 if( (lock_type == DEBUG_QUEUELOCK_TYPE) && 107 (lock == DEBUG_QUEUELOCK_PTR ) && 108 (local_cxy == DEBUG_QUEUELOCK_CXY ) ) 103 109 printk("\n[%s] thread[%x,%x] ACQUIRE q_lock %s [%x,%x]\n", 104 110 __FUNCTION__, this->process->pid, this->trdid, … … 126 132 uint32_t lock_type = lock->lock.type; 127 133 thread_t * this = CURRENT_THREAD; 128 if( (DEBUG_QUEUELOCK_TYPE == lock_type) || (DEBUG_QUEUELOCK_TYPE == 1000) ) 134 if( (lock_type == DEBUG_QUEUELOCK_TYPE) && 135 (lock == DEBUG_QUEUELOCK_PTR ) && 136 (local_cxy == DEBUG_QUEUELOCK_CXY ) ) 129 137 printk("\n[%s] thread[%x,%x] RELEASE q_lock %s [%x,%x]\n", 130 138 __FUNCTION__, this->process->pid, this->trdid, … … 142 150 143 151 #if DEBUG_QUEUELOCK_TYPE 144 if( (DEBUG_QUEUELOCK_TYPE == lock_type) || (DEBUG_QUEUELOCK_TYPE == 1000) ) 152 if( (lock_type == DEBUG_QUEUELOCK_TYPE) && 153 (lock == DEBUG_QUEUELOCK_PTR ) && 154 (local_cxy == DEBUG_QUEUELOCK_CXY ) ) 145 155 printk("\n[%s] thread[%x,%x] UNBLOCK thread [%x,%x] / q_lock %s [%x,%x]\n", 146 156 __FUNCTION__, this->process->pid, this->trdid, thread->process->pid, thread->trdid, -
trunk/kernel/libk/remote_barrier.c
r623 r629 467 467 if( cycle > DEBUG_BARRIER_WAIT ) 468 468 printk("\n[%s] thread[%x,%x] exit / barrier (%x,%x) / cycle %d\n", 469 __FUNCTION__, this-> trdid, this->process->pid, barrier_cxy, barrier_ptr, cycle );469 __FUNCTION__, this->process->pid, this->trdid, barrier_cxy, barrier_ptr, cycle ); 470 470 #endif 471 471 -
trunk/kernel/libk/remote_queuelock.c
r623 r629 2 2 * remote_queuelock.c - remote kernel lock with waiting queue implementation. 3 3 * 4 * Authors Alain Greiner (2016,2017,2018 )4 * Authors Alain Greiner (2016,2017,2018,2019) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 56 56 #if DEBUG_QUEUELOCK_TYPE 57 57 thread_t * this = CURRENT_THREAD; 58 if( DEBUG_QUEUELOCK_TYPE == type ) 58 if( (type == DEBUG_QUEUELOCK_TYPE) && 59 (lock_ptr == DEBUG_QUEUELOCK_PTR ) && 60 (lock_cxy == DEBUG_QUEUELOCK_CXY ) ) 59 61 printk("\n[%s] thread[%x,%x] initialise lock %s [%x,%x]\n", 60 62 __FUNCTION__, this->process->pid, this->trdid, … … 91 93 92 94 #if DEBUG_QUEUELOCK_TYPE 93 if( (DEBUG_QUEUELOCK_TYPE == lock_type) || (DEBUG_QUEUELOCK_TYPE == 1000) ) 95 if( (lock_type == DEBUG_QUEUELOCK_TYPE) && 96 (lock_ptr == DEBUG_QUEUELOCK_PTR ) && 97 (lock_cxy == DEBUG_QUEUELOCK_CXY ) ) 94 98 printk("\n[%s] thread[%x,%x] BLOCK on q_lock %s [%x,%x]\n", 95 99 __FUNCTION__, this->process->pid, this->trdid, … … 117 121 118 122 #if DEBUG_QUEUELOCK_TYPE 119 if( (DEBUG_QUEUELOCK_TYPE == lock_type) || (DEBUG_QUEUELOCK_TYPE == 1000) ) 123 if( (lock_type == DEBUG_QUEUELOCK_TYPE) && 124 (lock_ptr == DEBUG_QUEUELOCK_PTR ) && 125 (lock_cxy == DEBUG_QUEUELOCK_CXY ) ) 120 126 printk("\n[%s] thread[%x,%x] ACQUIRE q_lock %s [%x,%x]\n", 121 127 __FUNCTION__, this->process->pid, this->trdid, … … 152 158 thread_t * this = CURRENT_THREAD; 153 159 uint32_t lock_type = hal_remote_l32( XPTR( lock_cxy , &lock_ptr->lock.type ) ); 154 if( (DEBUG_QUEUELOCK_TYPE == lock_type) || (DEBUG_QUEUELOCK_TYPE == 1000) ) 160 if( (lock_type == DEBUG_QUEUELOCK_TYPE) && 161 (lock_ptr == DEBUG_QUEUELOCK_PTR ) && 162 (lock_cxy == DEBUG_QUEUELOCK_CXY ) ) 155 163 printk("\n[%s] thread[%x,%x] RELEASE q_lock %s (%x,%x)\n", 156 164 __FUNCTION__, this->process->pid, this->trdid, … … 171 179 172 180 #if DEBUG_QUEUELOCK_TYPE 173 if( (DEBUG_QUEUELOCK_TYPE == lock_type) || (DEBUG_QUEUELOCK_TYPE == 1000) ) 181 if( (lock_type == DEBUG_QUEUELOCK_TYPE) && 182 (lock_ptr == DEBUG_QUEUELOCK_PTR ) && 183 (lock_cxy == DEBUG_QUEUELOCK_CXY ) ) 174 184 { 175 185 trdid_t trdid = hal_remote_l32( XPTR( thread_cxy , &thread_ptr->trdid ) ); -
trunk/kernel/libk/remote_rwlock.c
r627 r629 55 55 #if DEBUG_RWLOCK_TYPE 56 56 thread_t * this = CURRENT_THREAD; 57 if( DEBUG_RWLOCK_TYPE == type ) 57 if( (type == DEBUG_RWLOCK_TYPE) && 58 ((intptr_t)lock_ptr == DEBUG_RWLOCK_PTR ) && 59 (lock_cxy == DEBUG_RWLOCK_CXY ) ) 58 60 printk("\n[%s] thread[%x,%x] initialise lock %s [%x,%x]\n", 59 61 __FUNCTION__, this->process->pid, this->trdid, … … 93 95 94 96 #if DEBUG_RWLOCK_TYPE 95 if( (DEBUG_RWLOCK_TYPE == lock_type) || (DEBUG_RWLOCK_TYPE == 1000) ) 97 if( (lock_type == DEBUG_RWLOCK_TYPE) && 98 ((intptr_t)lock_ptr == DEBUG_RWLOCK_PTR ) && 99 (lock_cxy == DEBUG_RWLOCK_CXY ) ) 96 100 printk("\n[%s] thread[%x,%x] READ BLOCK on rwlock %s [%x,%x] / taken %d / count %d\n", 97 101 __FUNCTION__, this->process->pid, this->trdid, … … 124 128 125 129 #if DEBUG_RWLOCK_TYPE 126 if( (DEBUG_RWLOCK_TYPE == lock_type) || (DEBUG_RWLOCK_TYPE == 1000) ) 127 printk("\n[%s] thread[%x,%x] READ ACQUIRE rwlock %s [%x,%x] / taken = %d / count = %d\n", 130 if( (lock_type == DEBUG_RWLOCK_TYPE) && 131 ((intptr_t)lock_ptr == DEBUG_RWLOCK_PTR ) && 132 (lock_cxy == DEBUG_RWLOCK_CXY ) ) 133 printk("\n[%s] thread[%x,%x] READ ACQUIRE rwlock %s [%x,%x] / taken %d / count %d\n", 128 134 __FUNCTION__, this->process->pid, this->trdid, 129 135 lock_type_str[lock_type], lock_cxy, lock_ptr, … … 166 172 167 173 #if DEBUG_RWLOCK_TYPE 168 if( (DEBUG_RWLOCK_TYPE == lock_type) || (DEBUG_RWLOCK_TYPE == 1000) ) 174 if( (lock_type == DEBUG_RWLOCK_TYPE) && 175 ((intptr_t)lock_ptr == DEBUG_RWLOCK_PTR ) && 176 (lock_cxy == DEBUG_RWLOCK_CXY ) ) 169 177 printk("\n[%s] thread[%x,%x] WRITE BLOCK on rwlock %s [%x,%x] / taken %d / count %d\n", 170 178 __FUNCTION__, this->process->pid, this->trdid, … … 196 204 197 205 #if DEBUG_RWLOCK_TYPE 198 if( (DEBUG_RWLOCK_TYPE == lock_type) || (DEBUG_RWLOCK_TYPE == 1000) ) 206 if( (lock_type == DEBUG_RWLOCK_TYPE) && 207 ((intptr_t)lock_ptr == DEBUG_RWLOCK_PTR ) && 208 (lock_cxy == DEBUG_RWLOCK_CXY ) ) 199 209 printk("\n[%s] thread[%x,%x] WRITE ACQUIRE rwlock %s [%x,%x] / taken %d / count %d\n", 200 210 __FUNCTION__, this->process->pid, this->trdid, … … 235 245 uint32_t lock_type = hal_remote_l32( XPTR( lock_cxy , &lock_ptr->lock.type ) ); 236 246 xptr_t taken_xp = XPTR( lock_cxy , &lock_ptr->taken ); 237 if( (DEBUG_RWLOCK_TYPE == lock_type) || (DEBUG_RWLOCK_TYPE == 1000) ) 247 if( (lock_type == DEBUG_RWLOCK_TYPE) && 248 ((intptr_t)lock_ptr == DEBUG_RWLOCK_PTR ) && 249 (lock_cxy == DEBUG_RWLOCK_CXY ) ) 238 250 printk("\n[%s] thread[%x,%x] READ RELEASE rwlock %s [%x,%x] / taken %d / count %d\n", 239 251 __FUNCTION__, this->process->pid, this->trdid, … … 258 270 259 271 #if DEBUG_RWLOCK_TYPE 260 if( (DEBUG_RWLOCK_TYPE == lock_type) || (DEBUG_RWLOCK_TYPE == 1000) ) 272 if( (lock_type == DEBUG_RWLOCK_TYPE) && 273 ((intptr_t)lock_ptr == DEBUG_RWLOCK_PTR ) && 274 (lock_cxy == DEBUG_RWLOCK_CXY ) ) 261 275 { 262 276 trdid_t trdid = hal_remote_l32( XPTR( thread_cxy , &thread_ptr->trdid ) ); … … 289 303 290 304 #if DEBUG_RWLOCK_TYPE 291 if( (DEBUG_RWLOCK_TYPE == lock_type) || (DEBUG_RWLOCK_TYPE == 1000) ) 305 if( (lock_type == DEBUG_RWLOCK_TYPE) && 306 ((intptr_t)lock_ptr == DEBUG_RWLOCK_PTR ) && 307 (lock_cxy == DEBUG_RWLOCK_CXY ) ) 292 308 { 293 309 trdid_t trdid = hal_remote_l32( XPTR( thread_cxy , &thread_ptr->trdid ) ); … … 334 350 uint32_t lock_type = hal_remote_l32( XPTR( lock_cxy , &lock_ptr->lock.type ) ); 335 351 xptr_t count_xp = XPTR( lock_cxy , &lock_ptr->count ); 336 if( (DEBUG_RWLOCK_TYPE == lock_type) || (DEBUG_RWLOCK_TYPE == 1000) ) 352 if( (lock_type == DEBUG_RWLOCK_TYPE) && 353 ((intptr_t)lock_ptr == DEBUG_RWLOCK_PTR ) && 354 (lock_cxy == DEBUG_RWLOCK_CXY ) ) 337 355 printk("\n[%s] thread[%x,%x] WRITE RELEASE rwlock %s [%x,%x] / taken %d / count %d\n", 338 356 __FUNCTION__, this->process->pid, this->trdid, … … 356 374 357 375 #if DEBUG_RWLOCK_TYPE 358 if( (DEBUG_RWLOCK_TYPE == lock_type) || (DEBUG_RWLOCK_TYPE == 1000) ) 376 if( (lock_type == DEBUG_RWLOCK_TYPE) && 377 ((intptr_t)lock_ptr == DEBUG_RWLOCK_PTR ) && 378 (lock_cxy == DEBUG_RWLOCK_CXY ) ) 359 379 { 360 380 trdid_t trdid = hal_remote_l32( XPTR( thread_cxy , &thread_ptr->trdid ) ); … … 386 406 387 407 #if DEBUG_RWLOCK_TYPE 388 if( (DEBUG_RWLOCK_TYPE == lock_type) || (DEBUG_RWLOCK_TYPE == 1000) ) 408 if( (lock_type == DEBUG_RWLOCK_TYPE) && 409 ((intptr_t)lock_ptr == DEBUG_RWLOCK_PTR ) && 410 (lock_cxy == DEBUG_RWLOCK_CXY ) ) 389 411 { 390 412 trdid_t trdid = hal_remote_l32( XPTR( thread_cxy , &thread_ptr->trdid ) ); -
trunk/kernel/libk/remote_rwlock.h
r627 r629 42 42 * taken, or if the number of readers is non zero, it registers in the "wr_root" waiting 43 43 * queue, blocks, and deschedules. It set "taken" otherwise. 44 * - when a reader completes its access, it decrement the readers "count", unblock the44 * - when a reader completes its access, it decrement the readers "count", unblock 45 45 * the first waiting writer if there is no other readers, and unblock all waiting 46 46 * readers if there no write request. -
trunk/kernel/libk/rwlock.c
r623 r629 2 2 * rwlock.c - kernel local read/write lock implementation. 3 3 * 4 * Author Alain Greiner (2016,2017,2018 )4 * Author Alain Greiner (2016,2017,2018,2019) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 52 52 #if DEBUG_RWLOCK_TYPE 53 53 thread_t * this = CURRENT_THREAD; 54 if( DEBUG_RWLOCK_TYPE == type ) 54 if( (type == DEBUG_RWLOCK_TYPE) && 55 ((intptr_t)lock == DEBUG_RWLOCK_PTR ) && 56 (local_cxy == DEBUG_RWLOCK_CXY ) ) 55 57 printk("\n[%s] thread[%x,%x] initialise lock %s [%x,%x]\n", 56 58 __FUNCTION__, this->process->pid, this->trdid, … … 80 82 81 83 #if DEBUG_RWLOCK_TYPE 82 if( (DEBUG_RWLOCK_TYPE == lock_type) || (DEBUG_RWLOCK_TYPE == 1000) ) 84 if( (lock_type == DEBUG_RWLOCK_TYPE) && 85 ((intptr_t)lock == DEBUG_RWLOCK_PTR ) && 86 (local_cxy == DEBUG_RWLOCK_CXY ) ) 83 87 printk("\n[%s] thread[%x,%x] READ BLOCK on rwlock %s [%x,%x] / taken %d / count %d\n", 84 88 __FUNCTION__, this->process->pid, this->trdid, … … 105 109 106 110 #if DEBUG_RWLOCK_TYPE 107 if( (DEBUG_RWLOCK_TYPE == lock_type) || (DEBUG_RWLOCK_TYPE == 1000) ) 111 if( (lock_type == DEBUG_RWLOCK_TYPE) && 112 ((intptr_t)lock == DEBUG_RWLOCK_PTR ) && 113 (local_cxy == DEBUG_RWLOCK_CXY ) ) 108 114 printk("\n[%s] thread[%x,%x] READ ACQUIRE rwlock %s [%x,%x] / taken %d / count %d\n", 109 115 __FUNCTION__, this->process->pid, this->trdid, … … 136 142 137 143 #if DEBUG_RWLOCK_TYPE 138 if( (DEBUG_RWLOCK_TYPE == lock_type) || (DEBUG_RWLOCK_TYPE == 1000) ) 144 if( (lock_type == DEBUG_RWLOCK_TYPE) && 145 ((intptr_t)lock == DEBUG_RWLOCK_PTR ) && 146 (local_cxy == DEBUG_RWLOCK_CXY ) ) 139 147 printk("\n[%s] thread[%x,%x] WRITE BLOCK on rwlock %s [%x,%x] / taken %d / count %d\n", 140 148 __FUNCTION__, this->process->pid, this->trdid, … … 161 169 162 170 #if DEBUG_RWLOCK_TYPE 163 if( (DEBUG_RWLOCK_TYPE == lock_type) || (DEBUG_RWLOCK_TYPE == 1000) ) 171 if( (lock_type == DEBUG_RWLOCK_TYPE) && 172 ((intptr_t)lock == DEBUG_RWLOCK_PTR ) && 173 (local_cxy == DEBUG_RWLOCK_CXY ) ) 164 174 printk("\n[%s] thread[%x,%x] WRITE ACQUIRE rwlock %s [%x,%x] / taken %d / count %d\n", 165 175 __FUNCTION__, this->process->pid, this->trdid, … … 187 197 thread_t * this = CURRENT_THREAD; 188 198 uint32_t lock_type = lock->lock.type; 189 if( (DEBUG_RWLOCK_TYPE == lock_type) || (DEBUG_RWLOCK_TYPE == 1000) ) 199 if( (lock_type == DEBUG_RWLOCK_TYPE) && 200 ((intptr_t)lock == DEBUG_RWLOCK_PTR ) && 201 (local_cxy == DEBUG_RWLOCK_CXY ) ) 190 202 printk("\n[%s] thread[%x,%x] READ RELEASE rwlock %s [%x,%x] / taken %d / count %d\n", 191 203 __FUNCTION__, this->process->pid, this->trdid, … … 201 213 202 214 #if DEBUG_RWLOCK_TYPE 203 if( (DEBUG_RWLOCK_TYPE == lock_type) || (DEBUG_RWLOCK_TYPE == 1000) ) 215 if( (lock_type == DEBUG_RWLOCK_TYPE) && 216 ((intptr_t)lock == DEBUG_RWLOCK_PTR ) && 217 (local_cxy == DEBUG_RWLOCK_CXY ) ) 204 218 printk("\n[%s] thread[%x,%x] UNBLOCK thread[%x,%x] / rwlock %s [%x,%x]\n", 205 219 __FUNCTION__, this->process->pid, this->trdid, thread->process->pid, thread->trdid, … … 223 237 224 238 #if DEBUG_RWLOCK_TYPE 225 if( (DEBUG_RWLOCK_TYPE == lock_type) || (DEBUG_RWLOCK_TYPE == 1000) ) 239 if( (lock_type == DEBUG_RWLOCK_TYPE) && 240 ((intptr_t)lock == DEBUG_RWLOCK_PTR ) && 241 (local_cxy == DEBUG_RWLOCK_CXY ) ) 226 242 printk("\n[%s] thread[%x,%x] UNBLOCK thread[%x,%x] / rwlock %s [%x,%x]\n", 227 243 __FUNCTION__, this->process->pid, this->trdid, thread->process->pid, thread->trdid, … … 257 273 thread_t * this = CURRENT_THREAD; 258 274 uint32_t lock_type = lock->lock.type; 259 if( (DEBUG_RWLOCK_TYPE == lock_type) || (DEBUG_RWLOCK_TYPE == 1000) ) 275 if( (lock_type == DEBUG_RWLOCK_TYPE) && 276 ((intptr_t)lock == DEBUG_RWLOCK_PTR ) && 277 (local_cxy == DEBUG_RWLOCK_CXY ) ) 260 278 printk("\n[%s] thread[%x,%x] WRITE RELEASE rwlock %s [%x,%x] / taken %d / count %d\n", 261 279 __FUNCTION__, this->process->pid, this->trdid, … … 270 288 271 289 #if DEBUG_RWLOCK_TYPE 272 if( (DEBUG_RWLOCK_TYPE == lock_type) || (DEBUG_RWLOCK_TYPE == 1000) ) 290 if( (lock_type == DEBUG_RWLOCK_TYPE) && 291 ((intptr_t)lock == DEBUG_RWLOCK_PTR ) && 292 (local_cxy == DEBUG_RWLOCK_CXY ) ) 273 293 printk("\n[%s] thread[%x,%x] UNBLOCK thread[%x,%x] / rwlock %s [%x,%x]\n", 274 294 __FUNCTION__, this->process->pid, this->trdid, thread->process->pid, thread->trdid, … … 291 311 292 312 #if DEBUG_RWLOCK_TYPE 293 if( (DEBUG_RWLOCK_TYPE == lock_type) || (DEBUG_RWLOCK_TYPE == 1000) ) 313 if( (lock_type == DEBUG_RWLOCK_TYPE) && 314 ((intptr_t)lock == DEBUG_RWLOCK_PTR ) && 315 (local_cxy == DEBUG_RWLOCK_CXY ) ) 294 316 printk("\n[%s] thread[%x,%x] UNBLOCK thread[%x,%x] / rwlock %s [%x,%x]\n", 295 317 __FUNCTION__, this->process->pid, this->trdid, thread->process->pid, thread->trdid, -
trunk/kernel/libk/user_dir.c
r619 r629 2 2 * user_dir.c - kernel DIR related operations implementation. 3 3 * 4 * Authors Alain Greiner (2016,2017,2018 )4 * Authors Alain Greiner (2016,2017,2018,2019) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 91 91 pid_t ref_pid; // reference process PID 92 92 xptr_t gpt_xp; // extended pointer on reference process GPT 93 uint32_t gpt_attributes; // attributes for all mapped gptentries93 uint32_t attr; // attributes for all GPT entries 94 94 uint32_t dirents_per_page; // number of dirent descriptors per page 95 95 xptr_t page_xp; // extended pointer on page descriptor … … 99 99 uint32_t total_dirents; // total number of dirents in dirent array 100 100 uint32_t total_pages; // total number of pages for dirent array 101 vpn_t vpn; // first page in dirent array vseg 101 vpn_t vpn_base; // first page in dirent array vseg 102 vpn_t vpn; // current page in dirent array vseg 102 103 ppn_t ppn; // ppn of currently allocated physical page 103 104 uint32_t entries; // number of dirent actually comied in one page … … 107 108 uint32_t page_id; // page index in list of physical pages 108 109 kmem_req_t req; // kmem request descriptor 110 ppn_t fake_ppn; // unused, but required by hal_gptlock_pte() 111 uint32_t fake_attr; // unused, but required by hal_gptlock_pte() 109 112 error_t error; 110 113 111 // get cluster, local pointer, and pid of reference userprocess114 // get cluster, local pointer, and pid of reference process 112 115 ref_cxy = GET_CXY( ref_xp ); 113 116 ref_ptr = GET_PTR( ref_xp ); … … 256 259 "unconsistent vseg size for dirent array" ); 257 260 258 // build extended pointer on reference process GPT , PTE attributes and ppn261 // build extended pointer on reference process GPT 259 262 gpt_xp = XPTR( ref_cxy , &ref_ptr->vmm.gpt ); 260 gpt_attributes = GPT_MAPPED | 261 GPT_SMALL | 262 GPT_READABLE | 263 GPT_CACHABLE | 264 GPT_USER ; 263 264 // build PTE attributes 265 attr = GPT_MAPPED | 266 GPT_SMALL | 267 GPT_READABLE | 268 GPT_CACHABLE | 269 GPT_USER ; 265 270 266 271 // get first vpn from vseg descriptor 267 vpn = hal_remote_l32( XPTR( ref_cxy , &vseg->vpn_base ) );272 vpn_base = hal_remote_l32( XPTR( ref_cxy , &vseg->vpn_base ) ); 268 273 269 274 // scan the list of allocated physical pages to map 270 // all physical pages in the in thereference process GPT275 // all physical pages in the reference process GPT 271 276 page_id = 0; 272 277 while( list_is_empty( &root ) == false ) … … 277 282 // compute ppn 278 283 ppn = ppm_page2ppn( XPTR( local_cxy , page ) ); 284 285 // compute vpn 286 vpn = vpn_base + page_id; 279 287 280 error = hal_gpt_set_pte( gpt_xp, 281 vpn + page_id, 282 gpt_attributes, 283 ppn ); 288 // lock the PTE (and create PT2 if required) 289 error = hal_gpt_lock_pte( gpt_xp, 290 vpn, 291 &fake_attr, 292 &fake_ppn ); 284 293 if( error ) 285 294 { 286 295 printk("\n[ERROR] in %s : cannot map vpn %x in GPT\n", 287 __FUNCTION__, (vpn + page_id));296 __FUNCTION__, vpn ); 288 297 289 298 // delete the vseg 290 if( ref_cxy == local_cxy) vmm_delete_vseg( ref_pid, vpn<<CONFIG_PPM_PAGE_SHIFT ); 291 else rpc_vmm_delete_vseg_client( ref_cxy, ref_pid, vpn<<CONFIG_PPM_PAGE_SHIFT ); 299 if( ref_cxy == local_cxy) 300 vmm_delete_vseg( ref_pid, vpn_base << CONFIG_PPM_PAGE_SHIFT ); 301 else 302 rpc_vmm_delete_vseg_client( ref_cxy, ref_pid, vpn_base << CONFIG_PPM_PAGE_SHIFT ); 292 303 293 304 // release the user_dir descriptor … … 298 309 } 299 310 311 // set PTE in GPT 312 hal_gpt_set_pte( gpt_xp, 313 vpn, 314 attr, 315 ppn ); 316 300 317 #if DEBUG_USER_DIR 301 318 if( cycle > DEBUG_USER_DIR ) … … 317 334 dir->current = 0; 318 335 dir->entries = total_dirents; 319 dir->ident = (intptr_t)(vpn << CONFIG_PPM_PAGE_SHIFT);336 dir->ident = (intptr_t)(vpn_base << CONFIG_PPM_PAGE_SHIFT); 320 337 321 338 // build extended pointers on root and lock of user_dir xlist in ref process -
trunk/kernel/libk/user_dir.h
r623 r629 2 2 * user_dir.h - DIR related operations definition. 3 3 * 4 * Authors Alain Greiner (2016,2017,2018 )4 * Authors Alain Greiner (2016,2017,2018,2019) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites -
trunk/kernel/mm/vmm.c
r625 r629 741 741 child_vmm = &child_process->vmm; 742 742 743 // initialize the locks protecting the child VSL and GPT 744 remote_rwlock_init( XPTR( local_cxy , &child_vmm->gpt_lock ) , LOCK_VMM_GPT ); 743 // initialize the lock protecting the child VSL 745 744 remote_rwlock_init( XPTR( local_cxy , &child_vmm->vsl_lock ) , LOCK_VMM_VSL ); 746 745 … … 952 951 xptr_t vsl_root_xp = XPTR( local_cxy , &vmm->vsegs_root ); 953 952 xptr_t vsl_lock_xp = XPTR( local_cxy , &vmm->vsl_lock ); 954 xptr_t gpt_lock_xp = XPTR( local_cxy , &vmm->gpt_lock );955 953 956 954 // take the VSL lock … … 1022 1020 } 1023 1021 1024 // take the GPT lock1025 remote_rwlock_wr_acquire( gpt_lock_xp );1026 1027 1022 // release memory allocated to the GPT itself 1028 1023 hal_gpt_destroy( &vmm->gpt ); 1029 1030 // release the GPT lock1031 remote_rwlock_wr_release( gpt_lock_xp );1032 1024 1033 1025 #if DEBUG_VMM_DESTROY … … 1226 1218 { 1227 1219 vmm_t * vmm; // local pointer on process VMM 1220 xptr_t gpt_xp; // extended pointer on GPT 1228 1221 bool_t is_ref; // local process is reference process 1229 1222 uint32_t vseg_type; // vseg type … … 1250 1243 vmm = &process->vmm; 1251 1244 1245 // build extended pointer on GPT 1246 gpt_xp = XPTR( local_cxy , &vmm->gpt ); 1247 1252 1248 // get relevant vseg infos 1253 1249 vseg_type = vseg->type; … … 1268 1264 { 1269 1265 // get ppn and attr 1270 hal_gpt_get_pte( XPTR( local_cxy , &vmm->gpt ), vpn , &attr , &ppn );1266 hal_gpt_get_pte( gpt_xp , vpn , &attr , &ppn ); 1271 1267 1272 1268 if( attr & GPT_MAPPED ) // PTE is mapped … … 1278 1274 #endif 1279 1275 // unmap GPT entry in local GPT 1280 hal_gpt_reset_pte( &vmm->gpt, vpn );1276 hal_gpt_reset_pte( gpt_xp , vpn ); 1281 1277 1282 1278 // get pointers on physical page descriptor … … 1915 1911 { 1916 1912 vseg_t * vseg; // vseg containing vpn 1917 uint32_t new_attr; // newPTE_ATTR value1918 ppn_t new_ppn; // newPTE_PPN value1913 uint32_t attr; // PTE_ATTR value 1914 ppn_t ppn; // PTE_PPN value 1919 1915 uint32_t ref_attr; // PTE_ATTR value in reference GPT 1920 1916 ppn_t ref_ppn; // PTE_PPN value in reference GPT … … 1922 1918 process_t * ref_ptr; // reference process for missing vpn 1923 1919 xptr_t local_gpt_xp; // extended pointer on local GPT 1924 xptr_t local_lock_xp; // extended pointer on local GPT lock1925 1920 xptr_t ref_gpt_xp; // extended pointer on reference GPT 1926 xptr_t ref_lock_xp; // extended pointer on reference GPT lock1927 1921 error_t error; // value returned by called functions 1928 1922 1923 thread_t * this = CURRENT_THREAD; 1924 1925 #if (CONFIG_INSTRUMENTATION_PGFAULTS || DEBUG_VMM_HANDLE_PAGE_FAULT) 1926 uint32_t start_cycle = (uint32_t)hal_get_cycles(); 1927 #endif 1928 1929 1929 #if DEBUG_VMM_HANDLE_PAGE_FAULT 1930 uint32_t cycle = (uint32_t)hal_get_cycles(); 1931 thread_t * this = CURRENT_THREAD; 1932 if( DEBUG_VMM_HANDLE_PAGE_FAULT < cycle ) 1930 if( DEBUG_VMM_HANDLE_PAGE_FAULT < start_cycle ) 1933 1931 printk("\n[%s] thread[%x,%x] enter for vpn %x / cycle %d\n", 1934 __FUNCTION__, this->process->pid, this->trdid, vpn, cycle ); 1935 hal_vmm_display( process , true ); 1932 __FUNCTION__, this->process->pid, this->trdid, vpn, start_cycle ); 1933 #endif 1934 1935 #if (DEBUG_VMM_HANDLE_PAGE_FAULT & 1) 1936 hal_vmm_display( this->process , false ); 1936 1937 #endif 1937 1938 … … 1942 1943 if( error ) 1943 1944 { 1944 printk("\n[ERROR] in %s : vpn %x in process %x not in registered vseg / cycle %d\n",1945 __FUNCTION__ , vpn , process->pid, (uint32_t)hal_get_cycles());1945 printk("\n[ERROR] in %s : vpn %x in thread[%x,%x] not in registered vseg\n", 1946 __FUNCTION__ , vpn , process->pid, this->trdid ); 1946 1947 1947 1948 return EXCP_USER_ERROR; … … 1949 1950 1950 1951 #if DEBUG_VMM_HANDLE_PAGE_FAULT 1951 cycle = (uint32_t)hal_get_cycles(); 1952 if( DEBUG_VMM_HANDLE_PAGE_FAULT < cycle ) 1953 printk("\n[%s] threadr[%x,%x] found vseg %s / cycle %d\n", 1954 __FUNCTION__, this->process->pid, this->trdid, vseg_type_str(vseg->type), cycle ); 1955 #endif 1956 1957 //////////////// private vseg => access only the local GPT 1958 if( (vseg->type == VSEG_TYPE_STACK) || (vseg->type == VSEG_TYPE_CODE) ) 1959 { 1960 // build extended pointer on local GPT and local GPT lock 1961 local_gpt_xp = XPTR( local_cxy , &process->vmm.gpt ); 1962 local_lock_xp = XPTR( local_cxy , &process->vmm.gpt_lock ); 1963 1964 // take local GPT lock in write mode 1965 remote_rwlock_wr_acquire( local_lock_xp ); 1966 1967 // check VPN still unmapped in local GPT 1968 1969 // do nothing if VPN has been mapped by a a concurrent page_fault 1970 hal_gpt_get_pte( local_gpt_xp, 1971 vpn, 1972 &new_attr, 1973 &new_ppn ); 1974 1975 if( (new_attr & GPT_MAPPED) == 0 ) // VPN still unmapped 1976 { 1977 // allocate and initialise a physical page depending on the vseg type 1978 error = vmm_get_one_ppn( vseg , vpn , &new_ppn ); 1979 1980 if( error ) 1981 { 1982 printk("\n[ERROR] in %s : no memory / process = %x / vpn = %x\n", 1983 __FUNCTION__ , process->pid , vpn ); 1984 1985 // release local GPT lock in write mode 1986 remote_rwlock_wr_release( local_lock_xp ); 1987 1988 return EXCP_KERNEL_PANIC; 1989 } 1990 1991 // define new_attr from vseg flags 1992 new_attr = GPT_MAPPED | GPT_SMALL; 1993 if( vseg->flags & VSEG_USER ) new_attr |= GPT_USER; 1994 if( vseg->flags & VSEG_WRITE ) new_attr |= GPT_WRITABLE; 1995 if( vseg->flags & VSEG_EXEC ) new_attr |= GPT_EXECUTABLE; 1996 if( vseg->flags & VSEG_CACHE ) new_attr |= GPT_CACHABLE; 1997 1998 // set PTE (PPN & attribute) to local GPT 1999 error = hal_gpt_set_pte( local_gpt_xp, 2000 vpn, 2001 new_attr, 2002 new_ppn ); 2003 if ( error ) 2004 { 2005 printk("\n[ERROR] in %s : cannot update local GPT / process %x / vpn = %x\n", 2006 __FUNCTION__ , process->pid , vpn ); 2007 2008 // release local GPT lock in write mode 2009 remote_rwlock_wr_release( local_lock_xp ); 2010 2011 return EXCP_KERNEL_PANIC; 2012 } 2013 } 2014 2015 // release local GPT lock in write mode 2016 remote_rwlock_wr_release( local_lock_xp ); 2017 2018 #if DEBUG_VMM_HANDLE_PAGE_FAULT 2019 cycle = (uint32_t)hal_get_cycles(); 2020 if( DEBUG_VMM_HANDLE_PAGE_FAULT < cycle ) 2021 printk("\n[%s] private page fault handled / vpn %x / ppn %x / attr %x / cycle %d\n", 2022 __FUNCTION__, vpn, new_ppn, new_attr, cycle ); 2023 #endif 2024 return EXCP_NON_FATAL; 2025 2026 } // end local GPT access 2027 2028 //////////// public vseg => access reference GPT 2029 else 1952 if( DEBUG_VMM_HANDLE_PAGE_FAULT < start_cycle ) 1953 printk("\n[%s] thread[%x,%x] found vseg %s\n", 1954 __FUNCTION__, this->process->pid, this->trdid, vseg_type_str(vseg->type) ); 1955 #endif 1956 1957 // build extended pointer on local GPT 1958 local_gpt_xp = XPTR( local_cxy , &process->vmm.gpt ); 1959 1960 // lock target PTE in local GPT and get current PPN and attributes 1961 error = hal_gpt_lock_pte( local_gpt_xp, 1962 vpn, 1963 &attr, 1964 &ppn ); 1965 if( error ) 1966 { 1967 printk("\n[PANIC] in %s : cannot lock PTE in local GPT / vpn %x / process %x\n", 1968 __FUNCTION__ , vpn , process->pid ); 1969 1970 return EXCP_KERNEL_PANIC; 1971 } 1972 1973 // handle page fault only if PTE still unmapped after lock 1974 if( (attr & GPT_MAPPED) == 0 ) 2030 1975 { 2031 1976 // get reference process cluster and local pointer … … 2033 1978 ref_ptr = GET_PTR( process->ref_xp ); 2034 1979 2035 // build extended pointer on reference GPT and reference GPT lock 2036 ref_gpt_xp = XPTR( ref_cxy , &ref_ptr->vmm.gpt ); 2037 ref_lock_xp = XPTR( ref_cxy , &ref_ptr->vmm.gpt_lock ); 2038 2039 // build extended pointer on local GPT and local GPT lock 2040 local_gpt_xp = XPTR( local_cxy , &process->vmm.gpt ); 2041 local_lock_xp = XPTR( local_cxy , &process->vmm.gpt_lock ); 2042 2043 // take reference GPT lock in read mode 2044 remote_rwlock_rd_acquire( ref_lock_xp ); 2045 2046 // get directly PPN & attributes from reference GPT 2047 // this can avoids a costly RPC for a false page fault 2048 hal_gpt_get_pte( ref_gpt_xp, 2049 vpn, 2050 &ref_attr, 2051 &ref_ppn ); 2052 2053 // release reference GPT lock in read mode 2054 remote_rwlock_rd_release( ref_lock_xp ); 2055 2056 if( ref_attr & GPT_MAPPED ) // false page fault => update local GPT 1980 // private vseg or (local == reference) => access only the local GPT 1981 if( (vseg->type == VSEG_TYPE_STACK) || 1982 (vseg->type == VSEG_TYPE_CODE) || 1983 (ref_cxy == local_cxy ) ) 2057 1984 { 2058 // take local GPT lock in write mode 2059 remote_rwlock_wr_acquire( local_lock_xp ); 2060 2061 // check VPN still unmapped in local GPT 2062 hal_gpt_get_pte( local_gpt_xp, 1985 // allocate and initialise a physical page depending on the vseg type 1986 error = vmm_get_one_ppn( vseg , vpn , &ppn ); 1987 1988 if( error ) 1989 { 1990 printk("\n[ERROR] in %s : no physical page / process = %x / vpn = %x\n", 1991 __FUNCTION__ , process->pid , vpn ); 1992 1993 // unlock PTE in local GPT 1994 hal_gpt_unlock_pte( local_gpt_xp , vpn ); 1995 1996 return EXCP_KERNEL_PANIC; 1997 } 1998 1999 // define attr from vseg flags 2000 attr = GPT_MAPPED | GPT_SMALL; 2001 if( vseg->flags & VSEG_USER ) attr |= GPT_USER; 2002 if( vseg->flags & VSEG_WRITE ) attr |= GPT_WRITABLE; 2003 if( vseg->flags & VSEG_EXEC ) attr |= GPT_EXECUTABLE; 2004 if( vseg->flags & VSEG_CACHE ) attr |= GPT_CACHABLE; 2005 2006 // set PTE to local GPT 2007 hal_gpt_set_pte( local_gpt_xp, 2063 2008 vpn, 2064 &new_attr, 2065 &new_ppn ); 2066 2067 if( (new_attr & GPT_MAPPED) == 0 ) // VPN still unmapped 2068 { 2069 // update local GPT from reference GPT 2070 error = hal_gpt_set_pte( local_gpt_xp, 2071 vpn, 2072 ref_attr, 2073 ref_ppn ); 2074 if( error ) 2075 { 2076 printk("\n[ERROR] in %s : cannot update local GPT / process %x / vpn %x\n", 2077 __FUNCTION__ , process->pid , vpn ); 2078 2079 // release local GPT lock in write mode 2080 remote_rwlock_wr_release( local_lock_xp ); 2081 2082 return EXCP_KERNEL_PANIC; 2083 } 2084 } 2085 else // VPN has been mapped by a a concurrent page_fault 2086 { 2087 // keep PTE from local GPT 2088 ref_attr = new_attr; 2089 ref_ppn = new_ppn; 2090 } 2091 2092 // release local GPT lock in write mode 2093 remote_rwlock_wr_release( local_lock_xp ); 2094 2009 attr, 2010 ppn ); 2011 2012 #if (CONFIG_INSTRUMENTATION_PGFAULTS || DEBUG_VMM_HANDLE_PAGE_FAULT) 2013 uint32_t end_cycle = (uint32_t)hal_get_cycles(); 2014 #endif 2015 2095 2016 #if DEBUG_VMM_HANDLE_PAGE_FAULT 2096 cycle = (uint32_t)hal_get_cycles(); 2097 if( DEBUG_VMM_HANDLE_PAGE_FAULT < cycle ) 2098 printk("\n[%s] false page fault handled / vpn %x / ppn %x / attr %x / cycle %d\n", 2099 __FUNCTION__, vpn, ref_ppn, ref_attr, cycle ); 2017 if( DEBUG_VMM_HANDLE_PAGE_FAULT < end_cycle ) 2018 printk("\n[%s] local page fault handled / vpn %x / ppn %x / attr %x / cycle %d\n", 2019 __FUNCTION__, vpn, ppn, attr, end_cycle ); 2020 #endif 2021 2022 #if CONFIG_INSTRUMENTATION_PGFAULTS 2023 this->info.local_pgfault_nr++; 2024 this->info.local_pgfault_cost += (end_cycle - start_cycle); 2100 2025 #endif 2101 2026 return EXCP_NON_FATAL; 2102 } 2103 else // true page fault => update reference GPT 2027 2028 } // end local GPT access 2029 2030 // public vseg and (local != reference) => access ref GPT to update local GPT 2031 else 2104 2032 { 2105 // take reference GPT lock in write mode 2106 remote_rwlock_wr_acquire( ref_lock_xp ); 2107 2108 // check VPN still unmapped in reference GPT 2109 // do nothing if VPN has been mapped by a a concurrent page_fault 2033 // build extended pointer on reference GPT 2034 ref_gpt_xp = XPTR( ref_cxy , &ref_ptr->vmm.gpt ); 2035 2036 // get current PPN and attributes from reference GPT 2110 2037 hal_gpt_get_pte( ref_gpt_xp, 2111 2038 vpn, … … 2113 2040 &ref_ppn ); 2114 2041 2115 if( (ref_attr & GPT_MAPPED) == 0 ) // VPN actually unmapped 2116 { 2042 if( ref_attr & GPT_MAPPED ) // false page fault => update local GPT 2043 { 2044 // update local GPT from reference GPT values 2045 hal_gpt_set_pte( local_gpt_xp, 2046 vpn, 2047 ref_attr, 2048 ref_ppn ); 2049 2050 #if (CONFIG_INSTRUMENTATION_PGFAULTS || DEBUG_VMM_HANDLE_PAGE_FAULT) 2051 uint32_t end_cycle = (uint32_t)hal_get_cycles(); 2052 #endif 2053 2054 #if DEBUG_VMM_HANDLE_PAGE_FAULT 2055 if( DEBUG_VMM_HANDLE_PAGE_FAULT < end_cycle ) 2056 printk("\n[%s] false page fault handled / vpn %x / ppn %x / attr %x / cycle %d\n", 2057 __FUNCTION__, vpn, ref_ppn, ref_attr, end_cycle ); 2058 #endif 2059 2060 #if CONFIG_INSTRUMENTATION_PGFAULTS 2061 this->info.false_pgfault_nr++; 2062 this->info.false_pgfault_cost += (end_cycle - start_cycle); 2063 #endif 2064 return EXCP_NON_FATAL; 2065 } 2066 else // true page fault => update both GPTs 2067 { 2117 2068 // allocate and initialise a physical page depending on the vseg type 2118 error = vmm_get_one_ppn( vseg , vpn , & new_ppn );2069 error = vmm_get_one_ppn( vseg , vpn , &ppn ); 2119 2070 2120 2071 if( error ) … … 2123 2074 __FUNCTION__ , process->pid , vpn ); 2124 2075 2125 // release reference GPT lock in write mode2126 remote_rwlock_wr_release( ref_lock_xp);2076 // unlock PTE in local GPT 2077 hal_gpt_unlock_pte( local_gpt_xp , vpn ); 2127 2078 2128 return EXCP_KERNEL_PANIC;2079 return EXCP_KERNEL_PANIC; 2129 2080 } 2130 2081 2131 // define new_attr from vseg flags 2132 new_attr = GPT_MAPPED | GPT_SMALL; 2133 if( vseg->flags & VSEG_USER ) new_attr |= GPT_USER; 2134 if( vseg->flags & VSEG_WRITE ) new_attr |= GPT_WRITABLE; 2135 if( vseg->flags & VSEG_EXEC ) new_attr |= GPT_EXECUTABLE; 2136 if( vseg->flags & VSEG_CACHE ) new_attr |= GPT_CACHABLE; 2137 2138 // update reference GPT 2139 error = hal_gpt_set_pte( ref_gpt_xp, 2140 vpn, 2141 new_attr, 2142 new_ppn ); 2143 2144 // update local GPT (protected by reference GPT lock) 2145 error |= hal_gpt_set_pte( local_gpt_xp, 2082 // lock PTE in reference GPT 2083 error = hal_gpt_lock_pte( ref_gpt_xp, 2146 2084 vpn, 2147 new_attr, 2148 new_ppn ); 2149 2085 &ref_attr, 2086 &ref_ppn ); 2150 2087 if( error ) 2151 2088 { 2152 printk("\n[ ERROR] in %s : cannot update GPT / process %x / vpn =%x\n",2153 __FUNCTION__ , process->pid , vpn);2154 2155 // release reference GPT lock in write mode2156 remote_rwlock_wr_release( ref_lock_xp);2157 2089 printk("\n[PANIC] in %s : cannot lock PTE in ref GPT / vpn %x / process %x\n", 2090 __FUNCTION__ , vpn , process->pid ); 2091 2092 // unlock PTE in local GPT 2093 hal_gpt_unlock_pte( local_gpt_xp , vpn ); 2094 2158 2095 return EXCP_KERNEL_PANIC; 2159 2096 } 2097 2098 // define attr from vseg flags 2099 attr = GPT_MAPPED | GPT_SMALL; 2100 if( vseg->flags & VSEG_USER ) attr |= GPT_USER; 2101 if( vseg->flags & VSEG_WRITE ) attr |= GPT_WRITABLE; 2102 if( vseg->flags & VSEG_EXEC ) attr |= GPT_EXECUTABLE; 2103 if( vseg->flags & VSEG_CACHE ) attr |= GPT_CACHABLE; 2104 2105 // set PTE in reference GPT 2106 hal_gpt_set_pte( ref_gpt_xp, 2107 vpn, 2108 attr, 2109 ppn ); 2110 2111 // set PTE in local GPT 2112 hal_gpt_set_pte( local_gpt_xp, 2113 vpn, 2114 attr, 2115 ppn ); 2116 2117 #if (CONFIG_INSTRUMENTATION_PGFAULTS || DEBUG_VMM_HANDLE_PAGE_FAULT) 2118 uint32_t end_cycle = (uint32_t)hal_get_cycles(); 2119 #endif 2120 2121 #if DEBUG_VMM_HANDLE_PAGE_FAULT 2122 if( DEBUG_VMM_HANDLE_PAGE_FAULT < end_cycle ) 2123 printk("\n[%s] global page fault handled / vpn %x / ppn %x / attr %x / cycle %d\n", 2124 __FUNCTION__, vpn, ppn, attr, end_cycle ); 2125 #endif 2126 2127 #if CONFIG_INSTRUMENTATION_PGFAULTS 2128 this->info.global_pgfault_nr++; 2129 this->info.global_pgfault_cost += (end_cycle - start_cycle); 2130 #endif 2131 return EXCP_NON_FATAL; 2160 2132 } 2161 2162 // release reference GPT lock in write mode2163 remote_rwlock_wr_release( ref_lock_xp );2164 2165 #if DEBUG_VMM_HANDLE_PAGE_FAULT2166 cycle = (uint32_t)hal_get_cycles();2167 if( DEBUG_VMM_HANDLE_PAGE_FAULT < cycle )2168 printk("\n[%s] true page fault handled / vpn %x / ppn %x / attr %x / cycle %d\n",2169 __FUNCTION__, vpn, new_ppn, new_attr, cycle );2170 #endif2171 return EXCP_NON_FATAL;2172 2133 } 2173 2134 } 2135 else // page has been locally mapped by another concurrent thread 2136 { 2137 // unlock PTE in local GPT 2138 hal_gpt_unlock_pte( local_gpt_xp , vpn ); 2139 2140 return EXCP_NON_FATAL; 2141 } 2142 2174 2143 } // end vmm_handle_page_fault() 2175 2144 … … 2179 2148 { 2180 2149 vseg_t * vseg; // vseg containing vpn 2181 cxy_t ref_cxy; // reference cluster for missing vpn 2182 process_t * ref_ptr; // reference process for missing vpn 2183 xptr_t gpt_xp; // extended pointer on GPT 2184 xptr_t gpt_lock_xp; // extended pointer on GPT lock 2150 xptr_t gpt_xp; // extended pointer on GPT (local or reference) 2151 gpt_t * gpt_ptr; // local pointer on GPT (local or reference) 2152 cxy_t gpt_cxy; // GPT cluster identifier 2185 2153 uint32_t old_attr; // current PTE_ATTR value 2186 2154 ppn_t old_ppn; // current PTE_PPN value 2187 2155 uint32_t new_attr; // new PTE_ATTR value 2188 2156 ppn_t new_ppn; // new PTE_PPN value 2157 cxy_t ref_cxy; // reference process cluster 2158 process_t * ref_ptr; // local pointer on reference process 2189 2159 error_t error; 2190 2160 2191 thread_t * this = CURRENT_THREAD;2161 thread_t * this = CURRENT_THREAD; 2192 2162 2193 2163 #if DEBUG_VMM_HANDLE_COW 2194 uint32_t cycle 2164 uint32_t cycle = (uint32_t)hal_get_cycles(); 2195 2165 if( DEBUG_VMM_HANDLE_COW < cycle ) 2196 2166 printk("\n[%s] thread[%x,%x] enter for vpn %x / core[%x,%d] / cycle %d\n", 2197 2167 __FUNCTION__, this->process->pid, this->trdid, vpn, local_cxy, this->core->lid, cycle ); 2168 #endif 2169 2170 #if (DEBUG_VMM_HANDLE_PAGE_FAULT & 1) 2198 2171 hal_vmm_display( process , true ); 2199 2172 #endif 2200 2201 // access local GPT to get GPT_COW flag2202 bool_t cow = hal_gpt_pte_is_cow( &(process->vmm.gpt), vpn );2203 2204 if( cow == false ) return EXCP_USER_ERROR;2205 2173 2206 2174 // get local vseg … … 2210 2178 if( error ) 2211 2179 { 2212 printk("\n[ PANIC] in %svpn %x in thread[%x,%x] not in a registered vseg\n",2180 printk("\n[ERROR] in %s : vpn %x in thread[%x,%x] not in a registered vseg\n", 2213 2181 __FUNCTION__, vpn, process->pid, this->trdid ); 2214 2182 2215 return EXCP_ KERNEL_PANIC;2216 } 2217 2218 #if ( DEBUG_VMM_HANDLE_COW & 1)2183 return EXCP_USER_ERROR; 2184 } 2185 2186 #if DEBUG_VMM_HANDLE_COW 2219 2187 if( DEBUG_VMM_HANDLE_COW < cycle ) 2220 printk("\n[%s] thread[%x,%x] get vseg for vpn %x\n",2221 __FUNCTION__, this->process->pid, this->trdid, v pn);2222 #endif 2223 2224 // get reference GPTcluster and local pointer2188 printk("\n[%s] thread[%x,%x] get vseg %s\n", 2189 __FUNCTION__, this->process->pid, this->trdid, vseg_type_str(vseg->type) ); 2190 #endif 2191 2192 // get reference process cluster and local pointer 2225 2193 ref_cxy = GET_CXY( process->ref_xp ); 2226 2194 ref_ptr = GET_PTR( process->ref_xp ); 2227 2195 2228 // build relevant extended pointers on relevant GPT and GPT lock2229 // - access local GPT for a private vseg2230 // - access reference GPT for a public vseg2196 // build pointers on relevant GPT 2197 // - access only local GPT for a private vseg 2198 // - access reference GPT and all copies for a public vseg 2231 2199 if( (vseg->type == VSEG_TYPE_STACK) || (vseg->type == VSEG_TYPE_CODE) ) 2232 2200 { 2233 gpt_xp = XPTR( local_cxy , &process->vmm.gpt ); 2234 gpt_lock_xp = XPTR( local_cxy , &process->vmm.gpt_lock ); 2201 gpt_cxy = local_cxy; 2202 gpt_ptr = &process->vmm.gpt; 2203 gpt_xp = XPTR( gpt_cxy , gpt_ptr ); 2235 2204 } 2236 2205 else 2237 2206 { 2238 gpt_xp = XPTR( ref_cxy , &ref_ptr->vmm.gpt ); 2239 gpt_lock_xp = XPTR( ref_cxy , &ref_ptr->vmm.gpt_lock ); 2240 } 2241 2242 // take GPT lock in write mode 2243 remote_rwlock_wr_acquire( gpt_lock_xp ); 2244 2245 // get current PTE from reference GPT 2246 hal_gpt_get_pte( gpt_xp, 2247 vpn, 2248 &old_attr, 2249 &old_ppn ); 2250 2251 #if( DEBUG_VMM_HANDLE_COW & 1) 2207 gpt_cxy = ref_cxy; 2208 gpt_ptr = &ref_ptr->vmm.gpt; 2209 gpt_xp = XPTR( gpt_cxy , gpt_ptr ); 2210 } 2211 2212 // lock target PTE in relevant GPT (local or reference) 2213 error = hal_gpt_lock_pte( gpt_xp, 2214 vpn, 2215 &old_attr, 2216 &old_ppn ); 2217 if( error ) 2218 { 2219 printk("\n[PANIC] in %s : cannot lock PTE in GPT / cxy %x / vpn %x / process %x\n", 2220 __FUNCTION__ , gpt_cxy, vpn , process->pid ); 2221 2222 return EXCP_KERNEL_PANIC; 2223 } 2224 2225 #if DEBUG_VMM_HANDLE_COW 2252 2226 if( DEBUG_VMM_HANDLE_COW < cycle ) 2253 2227 printk("\n[%s] thread[%x,%x] get pte for vpn %x : ppn %x / attr %x\n", … … 2255 2229 #endif 2256 2230 2257 // the PTE must be mapped for a COW 2258 if( (old_attr & GPT_MAPPED) == 0 ) 2259 { 2260 printk("\n[PANIC] in %s : VPN %x in process %x unmapped\n", 2261 __FUNCTION__, vpn, process->pid ); 2262 2263 // release GPT lock in write mode 2264 remote_rwlock_wr_release( gpt_lock_xp ); 2265 2266 return EXCP_KERNEL_PANIC; 2231 // return user error if COW attribute not set or PTE2 unmapped 2232 if( ((old_attr & GPT_COW) == 0) || ((old_attr & GPT_MAPPED) == 0) ) 2233 { 2234 hal_gpt_unlock_pte( gpt_xp , vpn ); 2235 2236 return EXCP_USER_ERROR; 2267 2237 } 2268 2238 … … 2282 2252 uint32_t forks = hal_remote_l32( forks_xp ); 2283 2253 2284 #if ( DEBUG_VMM_HANDLE_COW & 1)2254 #if DEBUG_VMM_HANDLE_COW 2285 2255 if( DEBUG_VMM_HANDLE_COW < cycle ) 2286 2256 printk("\n[%s] thread[%x,%x] get forks = %d for vpn %x\n", … … 2296 2266 remote_busylock_release( forks_lock_xp ); 2297 2267 2298 // allocate a new p age2268 // allocate a new physical page depending on vseg type 2299 2269 page_xp = vmm_page_allocate( vseg , vpn ); 2300 2270 … … 2304 2274 __FUNCTION__ , vpn, process->pid ); 2305 2275 2306 // release GPT lock in write mode 2307 remote_rwlock_wr_acquire( gpt_lock_xp ); 2276 hal_gpt_unlock_pte( gpt_xp , vpn ); 2308 2277 2309 2278 return EXCP_KERNEL_PANIC; … … 2313 2282 new_ppn = ppm_page2ppn( page_xp ); 2314 2283 2315 #if ( DEBUG_VMM_HANDLE_COW & 1)2284 #if DEBUG_VMM_HANDLE_COW 2316 2285 if( DEBUG_VMM_HANDLE_COW < cycle ) 2317 2286 printk("\n[%s] thread[%x,%x] get new ppn %x for vpn %x\n", … … 2324 2293 CONFIG_PPM_PAGE_SIZE ); 2325 2294 2326 #if (DEBUG_VMM_HANDLE_COW & 1)2295 #if DEBUG_VMM_HANDLE_COW 2327 2296 if( DEBUG_VMM_HANDLE_COW < cycle ) 2328 2297 printk("\n[%s] thread[%x,%x] copied old page to new page\n", … … 2344 2313 } 2345 2314 2346 // build new_attr : reset COW and set WRITABLE,2347 new_attr = ( old_attr | GPT_WRITABLE) & (~GPT_COW);2348 2349 // update the relevant GPT 2350 // - private vseg => update local GPT2351 // - public vseg => update allGPT copies2315 // build new_attr : set WRITABLE, reset COW, reset LOCKED 2316 new_attr = (((old_attr | GPT_WRITABLE) & (~GPT_COW)) & (~GPT_LOCKED)); 2317 2318 // update the relevant GPT(s) 2319 // - private vseg => update only the local GPT 2320 // - public vseg => update the reference GPT AND all the GPT copies 2352 2321 if( (vseg->type == VSEG_TYPE_STACK) || (vseg->type == VSEG_TYPE_CODE) ) 2353 2322 { 2323 // set the new PTE2 2354 2324 hal_gpt_set_pte( gpt_xp, 2355 2325 vpn, … … 2376 2346 } 2377 2347 2378 // release GPT lock in write mode2379 remote_rwlock_wr_release( gpt_lock_xp );2380 2381 2348 #if DEBUG_VMM_HANDLE_COW 2382 2349 cycle = (uint32_t)hal_get_cycles(); -
trunk/kernel/mm/vmm.h
r625 r629 106 106 * 3. The GPT in the reference cluster can be directly accessed by remote threads to handle 107 107 * false page-fault (page is mapped in the reference GPT, but the PTE copy is missing 108 * in the local GPT). It is also protected by a remote_rwlock. 108 * in the local GPT). As each PTE can be protected by a specific GPT_LOCKED attribute 109 * for exclusive access, it is NOT protected by a global lock. 109 110 ********************************************************************************************/ 110 111 … … 115 116 uint32_t vsegs_nr; /*! total number of local vsegs */ 116 117 117 remote_rwlock_t gpt_lock; /*! lock protecting the local GPT */118 118 gpt_t gpt; /*! Generic Page Table (complete in reference) */ 119 119 … … 165 165 * This function is called by the process_make_fork() function. It partially copies 166 166 * the content of a remote parent process VMM to the local child process VMM: 167 * - AllDATA, ANON, REMOTE vsegs registered in the parent VSL are registered in the167 * - The DATA, ANON, REMOTE vsegs registered in the parent VSL are registered in the 168 168 * child VSL. All valid PTEs in parent GPT are copied to the child GPT, but the 169 169 * WRITABLE flag is reset and the COW flag is set. 170 * - AllCODE vsegs registered in the parent VSL are registered in the child VSL, but the170 * - The CODE vsegs registered in the parent VSL are registered in the child VSL, but the 171 171 * GPT entries are not copied in the child GPT, and will be dynamically updated from 172 172 * the .elf file when a page fault is reported. 173 * - AllFILE vsegs registered in the parent VSL are registered in the child VSL, and all173 * - The FILE vsegs registered in the parent VSL are registered in the child VSL, and all 174 174 * valid GPT entries in parent GPT are copied to the child GPT. The COW flag is not set. 175 175 * - No STACK vseg is copied from parent VMM to child VMM, because the child stack vseg … … 186 186 187 187 /********************************************************************************************* 188 * This function is called by the process_make_fork() function executing the fork syscall.189 * It set the COW flag, and reset the WRITABLE flag of all GPT entries of the DATA, MMAP,190 * a nd REMOTE vsegs of a process identified bythe <process> argument.188 * This function is called by the process_make_fork() function to update the COW attribute 189 * in the parent parent process vsegs. It set the COW flag, and reset the WRITABLE flag of 190 * all GPT entries of the DATA, MMAP, and REMOTE vsegs of the <process> argument. 191 191 * It must be called by a thread running in the reference cluster, that contains the complete 192 192 * VSL and GPT (use the rpc_vmm_set_cow_client() when the calling thread client is remote). … … 201 201 202 202 /********************************************************************************************* 203 * This function modifies aGPT entry identified by the <process> and <vpn> arguments203 * This function modifies one GPT entry identified by the <process> and <vpn> arguments 204 204 * in all clusters containing a process copy. 205 * It must be called by a thread running in the referencecluster.206 * It updates all copies of the process in all clusters, to maintain coherence in GPT copies,207 * using the list of copies stored in the owner process, and using remote_write accesses to208 * update the remote GPTs. It cannot fail, as only mapped entriesin GPT copies are updated.205 * It must be called by a thread running in the process owner cluster. 206 * It is used to update to maintain coherence in GPT copies, using the list of copies 207 * stored in the owner process, and uses remote_write accesses. 208 * It cannot fail, as only mapped PTE2 in GPT copies are updated. 209 209 ********************************************************************************************* 210 210 * @ process : local pointer on local process descriptor. … … 373 373 * is mapped in the reference GPT, but not in the local GPT. For this false page-fault, 374 374 * the local GPT is simply updated from the reference GPT. 375 * 3) if the missing VPN is public, and unmapped in the ref erence GPT, it's a true page fault.375 * 3) if the missing VPN is public, and unmapped in the ref GPT, it is a true page fault. 376 376 * The calling thread allocates a new physical page, computes the attributes, depending 377 377 * on vseg type, and updates directly (without RPC) the local GPT and the reference GPT. 378 378 * Other GPT copies will updated on demand. 379 * Concurrent accesses to the GPT are handled, thanks to the380 * remote_rwlock protecting each GPT copy.379 * Concurrent accesses to the GPT(s) are handled, by locking the target PTE before accessing 380 * the local and/or reference GPT(s). 381 381 ********************************************************************************************* 382 382 * @ process : local pointer on local process. … … 392 392 * It returns a kernel panic if VPN is not in a registered vseg or is not mapped. 393 393 * For a legal mapped vseg there is two cases: 394 * 1) If the missing VPN belongs to a private vseg (STACK or CODE segment types, non 395 * replicated in all clusters), it access the local GPT to get the current PPN and ATTR. 394 * 1) If the missing VPN belongs to a private vseg (STACK), it access only the local GPT. 396 395 * It access the forks counter in the current physical page descriptor. 397 396 * If there is a pending fork, it allocates a new physical page from the cluster defined … … 399 398 * and decrements the pending_fork counter in old physical page descriptor. 400 399 * Finally, it reset the COW flag and set the WRITE flag in local GPT. 401 * 2) If the missing VPN is public, it access the reference GPT to get the current PPN and402 * ATTR.It access the forks counter in the current physical page descriptor.400 * 2) If the missing VPN is public, it access only the reference GPT. 401 * It access the forks counter in the current physical page descriptor. 403 402 * If there is a pending fork, it allocates a new physical page from the cluster defined 404 403 * by the vseg type, copies the old physical page content to the new physical page, … … 406 405 * Finally it calls the vmm_global_update_pte() function to reset the COW flag and set 407 406 * the WRITE flag in all the GPT copies, using a RPC if the reference cluster is remote. 408 * In both cases, concurrent accesses to the GPT are protected by the remote_rwlock409 * atached to the GPT copy in VMM.407 * In both cases, concurrent accesses to the GPT are handled by locking the target PTE 408 * before accessing the GPT. 410 409 ********************************************************************************************* 411 410 * @ process : pointer on local process descriptor copy. -
trunk/kernel/syscalls/sys_barrier.c
r626 r629 184 184 } 185 185 //////// 186 default: { 186 default: 187 { 187 188 assert ( false, "illegal operation type <%x>", operation ); 188 189 }
Note: See TracChangeset
for help on using the changeset viewer.