Changeset 610 for trunk/kernel/mm
- Timestamp:
- Dec 27, 2018, 7:38:58 PM (6 years ago)
- Location:
- trunk/kernel/mm
- Files:
-
- 6 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/kernel/mm/mapper.c
r606 r610 188 188 { 189 189 190 if( mapper_cxy == local_cxy ) // mapper is local 191 { 192 190 193 #if (DEBUG_MAPPER_GET_PAGE & 1) 191 194 if( DEBUG_MAPPER_GET_PAGE < cycle ) 192 printk("\n[%s] missing page => load it from IOC device\n", __FUNCTION__ ); 193 #endif 194 if( mapper_cxy == local_cxy ) // mapper is local 195 { 195 printk("\n[%s] missing page => load it from FS / local access \n", __FUNCTION__ ); 196 #endif 196 197 error = mapper_handle_miss( mapper_ptr, 197 198 page_id, … … 200 201 else 201 202 { 203 204 #if (DEBUG_MAPPER_GET_PAGE & 1) 205 if( DEBUG_MAPPER_GET_PAGE < cycle ) 206 printk("\n[%s] missing page => load it from FS / RPC access \n", __FUNCTION__ ); 207 #endif 202 208 rpc_mapper_handle_miss_client( mapper_cxy, 203 209 mapper_ptr, … … 253 259 vfs_inode_t * inode = mapper->inode; 254 260 vfs_inode_get_name( XPTR( local_cxy , inode ) , name ); 255 if( DEBUG_MAPPER_HANDLE_MISS < cycle ) 256 printk("\n[%s] enter for page %d in <%s> / cycle %d\n", 261 // if( DEBUG_MAPPER_HANDLE_MISS < cycle ) 262 // if( (page_id == 1) && (cycle > 10000000) ) 263 printk("\n[%s] enter for page %d in <%s> / cycle %d", 257 264 __FUNCTION__, page_id, name, cycle ); 258 265 if( DEBUG_MAPPER_HANDLE_MISS & 1 ) 259 grdxt_display( &mapper->rt, name );260 #endif 261 262 // allocate one page from the mappercluster266 grdxt_display( XPTR( local_cxy , &mapper->rt ) , name ); 267 #endif 268 269 // allocate one page from the local cluster 263 270 req.type = KMEM_PAGE; 264 271 req.size = 0; … … 313 320 #if DEBUG_MAPPER_HANDLE_MISS 314 321 cycle = (uint32_t)hal_get_cycles(); 315 if( DEBUG_MAPPER_HANDLE_MISS < cycle ) 316 printk("\n[%s] exit for page %d in <%s> / ppn %x / cycle %d\n", 322 // if( DEBUG_MAPPER_HANDLE_MISS < cycle ) 323 // if( (page_id == 1) && (cycle > 10000000) ) 324 printk("\n[%s] exit for page %d in <%s> / ppn %x / cycle %d", 317 325 __FUNCTION__, page_id, name, ppm_page2ppn( *page_xp ), cycle ); 318 326 if( DEBUG_MAPPER_HANDLE_MISS & 1 ) 319 grdxt_display( &mapper->rt, name );327 grdxt_display( XPTR( local_cxy , &mapper->rt ) , name ); 320 328 #endif 321 329 … … 348 356 } // end mapper_release_page() 349 357 350 //////////////////////////////////////////// 351 error_t mapper_move_user( mapper_t * mapper,358 /////////////////////////////////////////////// 359 error_t mapper_move_user( xptr_t mapper_xp, 352 360 bool_t to_buffer, 353 361 uint32_t file_offset, … … 355 363 uint32_t size ) 356 364 { 357 xptr_t mapper_xp; // extended pointer on local mapper358 365 uint32_t page_offset; // first byte to move to/from a mapper page 359 366 uint32_t page_count; // number of bytes to move to/from a mapper page … … 371 378 #endif 372 379 373 // build extended pointer on mapper374 mapper_xp = XPTR( local_cxy , mapper );375 376 380 // compute offsets of first and last bytes in file 377 381 uint32_t min_byte = file_offset; … … 384 388 #if (DEBUG_MAPPER_MOVE_USER & 1) 385 389 if( DEBUG_MAPPER_MOVE_USER < cycle ) 386 printk("\n[%s] first_page %d / last_page %d\n", __FUNCTION__, first, last ); 390 printk("\n[%s] thread[%x,%x] : first_page %d / last_page %d\n", 391 __FUNCTION__, this->process->pid, this->trdid, first, last ); 387 392 #endif 388 393 … … 404 409 #if (DEBUG_MAPPER_MOVE_USER & 1) 405 410 if( DEBUG_MAPPER_MOVE_USER < cycle ) 406 printk("\n[%s] page_id = %d / page_offset = %d / page_count = %d\n",407 __FUNCTION__ 411 printk("\n[%s] thread[%x,%x] : page_id = %d / page_offset = %d / page_count = %d\n", 412 __FUNCTION__, this->process->pid, this->trdid, page_id , page_offset , page_count ); 408 413 #endif 409 414 … … 412 417 413 418 if ( page_xp == XPTR_NULL ) return -1; 419 420 #if (DEBUG_MAPPER_MOVE_USER & 1) 421 if( DEBUG_MAPPER_MOVE_USER < cycle ) 422 printk("\n[%s] thread[%x,%x] : get page (%x,%x) from mapper\n", 423 __FUNCTION__, this->process->pid, this->trdid, GET_CXY(page_xp), GET_PTR(page_xp) ); 424 #endif 414 425 415 426 // compute pointer in mapper … … 547 558 } 548 559 560 #if (DEBUG_MAPPER_MOVE_KERNEL & 1) 561 if( DEBUG_MAPPER_MOVE_KERNEL < cycle ) 562 printk("\n[%s] src_cxy %x / src_ptr %x / dst_cxy %x / dst_ptr %x\n", 563 __FUNCTION__, src_cxy, src_ptr, dst_cxy, dst_ptr ); 564 #endif 565 549 566 // move fragment 550 567 hal_remote_memcpy( XPTR( dst_cxy , dst_ptr ), XPTR( src_cxy , src_ptr ), page_count ); -
trunk/kernel/mm/mapper.h
r606 r610 45 45 * - The leaves are pointers on physical page descriptors, dynamically allocated 46 46 * in the local cluster. 47 * - In a given cluster, a mapper is a "private" structure: a thread accessing the mapper48 * must be running in the cluster containing it (can be a local thread or a RPC thread).49 * - The mapper is protected by a blocking "rwlock", to support several simultaneous50 * readers, and only one writer. This lock implement a busy waiting policy.51 * - The mapper_get_page() function that return a page descriptor pointer from a page52 * index in file is in charge of handling the miss on the mapper cache.47 * - The mapper is protected by a "remote_rwlock", to support several simultaneous 48 * "readers", and only one "writer". 49 * - A "reader" thread, calling the mapper_remote_get_page() function to get a page 50 * descriptor pointer from the page index in file, can be remote (running in any cluster). 51 * - A "writer" thread, calling the mapper_handle_miss() function to handle a page miss 52 * must be local (running in the mapper cluster). 53 53 * - The vfs_mapper_move_page() function access the file system to handle a mapper miss, 54 54 * or update a dirty page on device. 55 * - The vfs_mapper_load_all() functions is used to load all pages of a given file56 * or directory into the mapper.55 * - The vfs_mapper_load_all() functions is used to load all pages of a directory 56 * into the mapper (prefetch). 57 57 * - the mapper_move_user() function is used to move data to or from an user buffer. 58 58 * This user space buffer can be physically distributed in several clusters. … … 137 137 138 138 /******************************************************************************************* 139 * This function move data between a local mapper, and a distributed user buffer.140 * It must be called by a thread running in cluster containing the mapper.139 * This function move data between a remote mapper, dentified by the <mapper_xp> argument, 140 * and a distributed user buffer. It can be called by a thread running in any cluster. 141 141 * It is called by the vfs_user_move() to implement sys_read() and sys_write() syscalls. 142 142 * If required, the data transfer is split in "fragments", where one fragment contains … … 144 144 * It uses "hal_uspace" accesses to move a fragment to/from the user buffer. 145 145 * In case of write, the dirty bit is set for all pages written in the mapper. 146 * The mapper being an extendable cache, it is automatically extended when required 147 * for both read and write accesses. 146 * The mapper being an extendable cache, it is automatically extended when required. 148 147 * The "offset" field in the file descriptor, and the "size" field in inode descriptor 149 148 * are not modified by this function. 150 149 ******************************************************************************************* 151 * @ mapper : localpointer on mapper.150 * @ mapper_xp : extended pointer on mapper. 152 151 * @ to_buffer : mapper -> buffer if true / buffer -> mapper if false. 153 152 * @ file_offset : first byte to move in file. … … 156 155 * returns O if success / returns -1 if error. 157 156 ******************************************************************************************/ 158 error_t mapper_move_user( mapper_t * mapper,157 error_t mapper_move_user( xptr_t mappe_xp, 159 158 bool_t to_buffer, 160 159 uint32_t file_offset, -
trunk/kernel/mm/ppm.c
r606 r610 413 413 xptr_t dirty_lock_xp = XPTR( page_cxy , &ppm->dirty_lock ); 414 414 415 // printk("\n@@@ %s : before dirty_list lock aquire\n", __FUNCTION__ ); 416 415 417 // lock the remote PPM dirty_list 416 418 remote_queuelock_acquire( dirty_lock_xp ); 417 419 420 // printk("\n@@@ %s : after dirty_list lock aquire\n", __FUNCTION__ ); 421 418 422 // lock the remote page 419 423 remote_busylock_acquire( page_lock_xp ); 424 425 // printk("\n@@@ %s : after page lock aquire\n", __FUNCTION__ ); 420 426 421 427 // get remote page flags … … 460 466 } 461 467 468 // printk("\n@@@ %s : before page lock release\n", __FUNCTION__ ); 469 462 470 // unlock the remote page 463 471 remote_busylock_release( page_lock_xp ); 464 472 473 // printk("\n@@@ %s : after page lock release\n", __FUNCTION__ ); 474 465 475 // unlock the remote PPM dirty_list 466 476 remote_queuelock_release( dirty_lock_xp ); 477 478 // printk("\n@@@ %s : after page lock aquire\n", __FUNCTION__ ); 467 479 468 480 return done; -
trunk/kernel/mm/ppm.h
r606 r610 62 62 * also rooted in the PPM, in order to be able to save all dirty pages on disk. 63 63 * This dirty list is protected by a specific remote_queuelock, because it can be 64 * modified by a remote thread, but it is implemented as a local list, because it 65 * contains only local pages. 64 * modified by a remote thread, but it contains only local pages. 66 65 ****************************************************************************************/ 67 66 … … 193 192 * It can be called by a thread running in any cluster. 194 193 * - it takes the queuelock protecting the PPM dirty_list. 194 * - it takes the busylock protecting the page flags. 195 195 * - it test the PG_DIRTY flag in the page descriptor. 196 196 * . if page already dirty => do nothing 197 197 * . it page not dirty => set the PG_DIRTY flag and register page in PPM dirty list. 198 * - it releases the busylock protcting the page flags. 198 199 * - it releases the queuelock protecting the PPM dirty_list. 199 200 ***************************************************************************************** … … 207 208 * It can be called by a thread running in any cluster. 208 209 * - it takes the queuelock protecting the PPM dirty_list. 210 * - it takes the busylock protecting the page flags. 209 211 * - it test the PG_DIRTY flag in the page descriptor. 210 212 * . if page not dirty => do nothing 211 213 * . it page dirty => reset the PG_DIRTY flag and remove page from PPM dirty list. 214 * - it releases the busylock protcting the page flags. 212 215 * - it releases the queuelock protecting the PPM dirty_list. 213 216 ***************************************************************************************** -
trunk/kernel/mm/vmm.c
r606 r610 1444 1444 #endif 1445 1445 1446 // compute target cluster1447 1446 page_t * page_ptr; 1448 1447 cxy_t page_cxy; … … 1611 1610 #if (DEBUG_VMM_GET_ONE_PPN & 0x1) 1612 1611 if( DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() ) 1613 printk("\n[%s] thread[%x,%x] for vpn %x / both mapper & BSS\n" ,1612 printk("\n[%s] thread[%x,%x] for vpn %x / both mapper & BSS\n" 1614 1613 " %d bytes from mapper / %d bytes from BSS\n", 1615 1614 __FUNCTION__, this->process->pid, this->trdid, vpn, … … 1674 1673 (intptr_t)vpn<<CONFIG_PPM_PAGE_SHIFT, 1675 1674 &vseg ); 1676 1677 1675 if( error ) 1678 1676 { … … 1933 1931 #endif 1934 1932 1933 // access local GPT to get GPT_COW flag 1934 bool_t cow = hal_gpt_pte_is_cow( &(process->vmm.gpt), vpn ); 1935 1936 if( cow == false ) return EXCP_USER_ERROR; 1937 1935 1938 // get local vseg 1936 1939 error = vmm_get_vseg( process, 1937 1940 (intptr_t)vpn<<CONFIG_PPM_PAGE_SHIFT, 1938 1941 &vseg ); 1939 1940 1942 if( error ) 1941 1943 { … … 1950 1952 ref_ptr = GET_PTR( process->ref_xp ); 1951 1953 1952 // build relevant extended pointers on GPT and GPT lock1954 // build relevant extended pointers on relevant GPT and GPT lock 1953 1955 // - access local GPT for a private vseg 1954 1956 // - access reference GPT for a public vseg -
trunk/kernel/mm/vmm.h
r595 r610 158 158 bool_t mapping ); 159 159 160 /******************************************************************************************* 160 /********************************************************************************************* 161 161 * This function adds a vseg descriptor in the VSL of a given VMM, 162 162 * and updates the vmm field in the vseg descriptor. 163 163 * It takes the lock protecting VSL. 164 ******************************************************************************************* 164 ********************************************************************************************* 165 165 * @ vmm : pointer on the VMM 166 166 * @ vseg : pointer on the vseg descriptor 167 ****************************************************************************************** /167 ********************************************************************************************/ 168 168 void vmm_vseg_attach( struct vmm_s * vmm, 169 169 vseg_t * vseg ); 170 170 171 /******************************************************************************************* 171 /********************************************************************************************* 172 172 * This function removes a vseg descriptor from the set of vsegs controlled by a given VMM, 173 173 * and updates the vmm field in the vseg descriptor. No memory is released. 174 174 * It takes the lock protecting VSL. 175 ******************************************************************************************* 175 ********************************************************************************************* 176 176 * @ vmm : pointer on the VMM 177 177 * @ vseg : pointer on the vseg descriptor 178 ****************************************************************************************** /178 ********************************************************************************************/ 179 179 void vmm_vseg_detach( struct vmm_s * vmm, 180 180 vseg_t * vseg ); … … 326 326 * (d) if the removed region cut the vseg in three parts, it is modified, and a new 327 327 * vseg is created with same type. 328 * FIXME [AG] this function mustbe called by a thread running in the reference cluster,329 * and the VMM mustbe updated in all process descriptors copies.328 * FIXME [AG] this function should be called by a thread running in the reference cluster, 329 * and the VMM should be updated in all process descriptors copies. 330 330 ********************************************************************************************* 331 331 * @ process : pointer on process descriptor … … 357 357 /********************************************************************************************* 358 358 * This function is called by the generic exception handler in case of page-fault event, 359 * detected for a given <vpn> in a given <process> in any cluster.359 * detected for a given <vpn>. The <process> argument is used to access the relevant VMM. 360 360 * It checks the missing VPN and returns an user error if it is not in a registered vseg. 361 361 * For a legal VPN, there is actually 3 cases: … … 370 370 * on vseg type, and updates directly (without RPC) the local GPT and the reference GPT. 371 371 * Other GPT copies will updated on demand. 372 * In the three cases, concurrent accesses to the GPT are handled, thanks to the372 * Concurrent accesses to the GPT are handled, thanks to the 373 373 * remote_rwlock protecting each GPT copy. 374 374 ********************************************************************************************* 375 * @ process : pointer on local process descriptor copy.376 * @ vpn 375 * @ process : local pointer on local process. 376 * @ vpn : VPN of the missing PTE. 377 377 * @ returns EXCP_NON_FATAL / EXCP_USER_ERROR / EXCP_KERNEL_PANIC after analysis 378 378 ********************************************************************************************/ … … 381 381 382 382 /********************************************************************************************* 383 * This function is called by the generic exception handler in case of copy-on-writeevent,384 * detected for a given <vpn> in a given <process> in any cluster.383 * This function is called by the generic exception handler in case of WRITE violation event, 384 * detected for a given <vpn>. The <process> argument is used to access the relevant VMM. 385 385 * It returns a kernel panic if VPN is not in a registered vseg or is not mapped. 386 386 * For a legal mapped vseg there is two cases: … … 399 399 * Finally it calls the vmm_global_update_pte() function to reset the COW flag and set 400 400 * the WRITE flag in all the GPT copies, using a RPC if the reference cluster is remote. 401 * In both cases, concurrent accesses to the GPT are handled, thanks to the402 * remote_rwlock protecting each GPT copy.401 * In both cases, concurrent accesses to the GPT are protected by the remote_rwlock 402 * atached to the GPT copy in VMM. 403 403 ********************************************************************************************* 404 404 * @ process : pointer on local process descriptor copy.
Note: See TracChangeset
for help on using the changeset viewer.