Changeset 313 for trunk/kernel
- Timestamp:
- Aug 2, 2017, 3:24:57 PM (7 years ago)
- Location:
- trunk/kernel
- Files:
-
- 14 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/kernel/kern/rpc.c
r296 r313 75 75 &rpc_kcm_free_server, // 23 76 76 &rpc_mapper_move_buffer_server, // 24 77 &rpc_ undefined,// 2577 &rpc_mapper_get_page_server, // 25 78 78 &rpc_undefined, // 26 79 79 &rpc_undefined, // 27 … … 96 96 void rpc_pmem_get_pages_client( cxy_t cxy, 97 97 uint32_t order, // in 98 error_t * error, // out 99 uint32_t * ppn ) // out 98 page_t ** page ) // out 100 99 { 101 100 rpc_dmsg("\n[INFO] %s : enter at cycle %d\n", __FUNCTION__ , hal_time_stamp() ); … … 114 113 rpc_send_sync( cxy , &rpc ); 115 114 116 // get output arguments RPC descriptor 117 *error = (error_t)rpc.args[0]; 118 *ppn = (uint32_t)rpc.args[1]; 115 // get output arguments from RPC descriptor 116 *page = (page_t *)(intptr_t)rpc.args[1]; 119 117 120 118 rpc_dmsg("\n[INFO] %s : completed at cycle %d\n", __FUNCTION__ , hal_time_stamp() ); … … 124 122 void rpc_pmem_get_pages_server( xptr_t xp ) 125 123 { 126 uint32_t order; // input127 error_t error; // output128 uint32_t ppn; // output129 130 124 rpc_dmsg("\n[INFO] %s : enter at cycle %d\n", __FUNCTION__ , hal_time_stamp() ); 131 125 … … 135 129 136 130 // get input arguments from client RPC descriptor 137 order = hal_remote_lw( XPTR( cxy , &desc->args[0] ) );131 uint32_t order = hal_remote_lw( XPTR( cxy , &desc->args[0] ) ); 138 132 139 133 // call local pmem allocator 140 134 page_t * page = ppm_alloc_pages( order ); 141 error = ( page == NULL ) ? ENOMEM : 0;142 ppn = ppm_page2ppn( page );143 135 144 136 // set output arguments into client RPC descriptor 145 hal_remote_sw( XPTR( cxy , &desc->args[0] ) , error ); 146 hal_remote_sw( XPTR( cxy , &desc->args[1] ) , ppn ); 137 hal_remote_swd( XPTR( cxy , &desc->args[1] ) , (uint64_t)(intptr_t)page ); 147 138 148 139 rpc_dmsg("\n[INFO] %s : completed at cycle %d\n", __FUNCTION__ , hal_time_stamp() ); … … 1353 1344 bool_t is_user, // in 1354 1345 uint32_t file_offset, // in 1355 void *buffer, // in1346 uint64_t buffer, // in 1356 1347 uint32_t size, // in 1357 1348 error_t * error ) // out … … 1371 1362 rpc.args[2] = (uint64_t)is_user; 1372 1363 rpc.args[3] = (uint64_t)file_offset; 1373 rpc.args[4] = (uint64_t) (intptr_t)buffer;1364 rpc.args[4] = (uint64_t)buffer; 1374 1365 rpc.args[5] = (uint64_t)size; 1375 1366 … … 1390 1381 bool_t is_user; 1391 1382 uint32_t file_offset; 1392 void * buffer; 1383 void * user_buffer; 1384 xptr_t kern_buffer; 1393 1385 uint32_t size; 1394 1386 error_t error; … … 1405 1397 is_user = hal_remote_lwd( XPTR( client_cxy , &desc->args[2] ) ); 1406 1398 file_offset = hal_remote_lwd( XPTR( client_cxy , &desc->args[3] ) ); 1407 buffer = (void *)(intptr_t)hal_remote_lwd( XPTR( client_cxy , &desc->args[4] ) );1408 1399 size = hal_remote_lwd( XPTR( client_cxy , &desc->args[5] ) ); 1409 1400 1410 1401 // call local kernel function 1411 error = mapper_move_buffer( mapper, 1412 to_buffer, 1413 is_user, 1414 file_offset, 1415 buffer, 1416 size ); 1402 if( is_user ) 1403 { 1404 user_buffer = (void *)(intptr_t)hal_remote_lwd( XPTR( client_cxy , &desc->args[4] ) ); 1405 1406 error = mapper_move_user_buffer( mapper, 1407 to_buffer, 1408 file_offset, 1409 user_buffer, 1410 size ); 1411 } 1412 else 1413 { 1414 kern_buffer = (xptr_t)hal_remote_lwd( XPTR( client_cxy , &desc->args[4] ) ); 1415 1416 error = mapper_move_user_buffer( mapper, 1417 to_buffer, 1418 file_offset, 1419 kern_buffer, 1420 size ); 1421 } 1417 1422 1418 1423 // set output argument to client RPC descriptor … … 1421 1426 rpc_dmsg("\n[INFO] %s : completed at cycle %d\n", __FUNCTION__ , hal_time_stamp() ); 1422 1427 } 1428 1429 ///////////////////////////////////////////////////////////////////////////////////////// 1430 // [25] Marshaling functions attached to RPC_MAPPER_GET_PAGE 1431 ///////////////////////////////////////////////////////////////////////////////////////// 1432 1433 /////////////////////////////////////////////////////// 1434 void rpc_mapper_get_page_client( cxy_t cxy, 1435 struct mapper_s * mapper, // in 1436 uint32_t index, // in 1437 page_t ** page ) // out 1438 { 1439 rpc_dmsg("\n[INFO] %s : enter at cycle %d\n", __FUNCTION__ , hal_time_stamp() ); 1440 1441 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); 1442 1443 // initialise RPC descriptor header 1444 rpc_desc_t rpc; 1445 rpc.index = RPC_MAPPER_GET_PAGE; 1446 rpc.response = 1; 1447 1448 // set input arguments in RPC descriptor 1449 rpc.args[0] = (uint64_t)(intptr_t)mapper; 1450 rpc.args[1] = (uint64_t)index; 1451 1452 // register RPC request in remote RPC fifo (blocking function) 1453 rpc_send_sync( cxy , &rpc ); 1454 1455 // get output values from RPC descriptor 1456 *page = (page_t *)(intptr_t)rpc.args[2]; 1457 1458 rpc_dmsg("\n[INFO] %s : completed at cycle %d\n", __FUNCTION__ , hal_time_stamp() ); 1459 } 1460 1461 //////////////////////////////////////////// 1462 void rpc_mapper_get_page_server( xptr_t xp ) 1463 { 1464 rpc_dmsg("\n[INFO] %s : enter at cycle %d\n", __FUNCTION__ , hal_time_stamp() ); 1465 1466 // get client cluster identifier and pointer on RPC descriptor 1467 cxy_t cxy = (cxy_t)GET_CXY( xp ); 1468 rpc_desc_t * desc = (rpc_desc_t *)GET_PTR( xp ); 1469 1470 // get input arguments from client RPC descriptor 1471 mapper_t * mapper = (mapper_t *)(intptr_t)hal_remote_lwd( XPTR( cxy , &desc->args[0] ) ); 1472 uint32_t index = (uint32_t) hal_remote_lwd( XPTR( cxy , &desc->args[1] ) ); 1473 1474 // call local pmem allocator 1475 page_t * page = mapper_get_page( mapper , index ); 1476 1477 // set output arguments into client RPC descriptor 1478 hal_remote_swd( XPTR( cxy , &desc->args[1] ) , (uint64_t)(intptr_t)page ); 1479 1480 rpc_dmsg("\n[INFO] %s : completed at cycle %d\n", __FUNCTION__ , hal_time_stamp() ); 1481 } 1482 1423 1483 1424 1484 /***************************************************************************************/ -
trunk/kernel/kern/rpc.h
r296 r313 35 35 36 36 struct process_s; 37 struct page_s; 37 38 struct vseg_s; 38 39 struct exec_info_s; … … 80 81 RPC_KCM_FREE = 23, 81 82 RPC_MAPPER_MOVE_BUFFER = 24, 83 RPC_MAPPER_GET_PAGE = 25, 82 84 83 85 RPC_MAX_INDEX = 30, … … 204 206 /*********************************************************************************** 205 207 * [0] The RPC_PMEM_GET_PAGES allocates one or several pages in a remote cluster, 206 * and returns the PPN of the first allocated page.208 * and returns the local pointer on the page descriptor. 207 209 *********************************************************************************** 208 210 * @ cxy : server cluster identifier 209 211 * @ order : [in] ln2( number of requested pages ) 210 * @ error : [out] error status (0 if success) 211 * @ ppn : [out] first physical page number 212 **********************************************************************************/ 213 void rpc_pmem_get_pages_client( cxy_t cxy, 214 uint32_t order, 215 error_t * error, 216 uint32_t * ppn ); 212 * @ page : [out] local pointer on page descriptor / NULL if failure 213 **********************************************************************************/ 214 void rpc_pmem_get_pages_client( cxy_t cxy, 215 uint32_t order, 216 struct page_s ** page ); 217 217 218 218 void rpc_pmem_get_pages_server( xptr_t xp ); … … 550 550 551 551 /*********************************************************************************** 552 * [24] The RPC_MAPPER_MOVE_USER is called by the vfs_move() function. 553 * It allows a client thread to requires a remote mapper to move data to/from 554 * an user buffer, as specified by the arguments. 552 * [24] The RPC_MAPPER_MOVE_BUFFER allows a client thread to require a remote 553 * mapper to move data to/from a kernel or user buffer. 554 * - It calls the mapper_move_user() function for a - possibly distributed - 555 * user buffer identified by a user-space pointer, and casted to uint64_t. 556 * - It calls the mapper_move_kernel() function for a - possibly remote - 557 * kernel buffer identified by an extended pointer, and casted to uint64_t. 558 * It is used by the vfs_move_user() function to move data between a mapper 559 * and an user buffer required by a sys_read() or a sys_write(). 560 * It is used by the vmm_get_one_ppn() function to initialise a physical page 561 * from a .elf file mapper, for a CODE or DATA vseg page fault. 555 562 *********************************************************************************** 556 563 * @ cxy : server cluster identifier. 557 564 * @ mapper : [in] local pointer on mapper 558 * @ to_buffer : [in] move data from buffer to mapper if non zero.559 * @ is_user : [in] buffer in user space if non zero.565 * @ to_buffer : [in] move data from mapper to buffer if non zero. 566 * @ is_user : [in] buffer in user space if true 560 567 * @ file_offset : [in] first byte to move in mapper 561 * @ buffer : [in] pointer on buffer in user space568 * @ buffer : [in] user space pointer / kernel extended pointer 562 569 * @ size : [in] number of bytes to move 563 570 * @ error : [out] error status (0 if success). … … 568 575 bool_t is_user, 569 576 uint32_t file_offset, 570 void *buffer,577 uint64_t buffer, 571 578 uint32_t size, 572 579 error_t * error ); … … 574 581 void rpc_mapper_move_buffer_server( xptr_t xp ); 575 582 576 577 583 /*********************************************************************************** 584 * [25] The RPC_MAPPER_GET_PAGE allows a client thread to get the local pointer 585 * on a remote page descriptor, for a page, identified by the page index in mapper. 586 * It is used by the vmm_get_one_ppn() function to handle a page fault on 587 * a FILE type vseg. 588 *********************************************************************************** 589 * @ cxy : server cluster identifier. 590 * @ mapper : [in] local pointer on mapper. 591 * @ index : [in] page index in mapper. 592 * @ page : [out] local pointer on page descriptor / NULL if failure. 593 **********************************************************************************/ 594 void rpc_mapper_get_page_client( cxy_t cxy, 595 struct mapper_s * mapper, 596 uint32_t index, 597 struct page_s ** page ); 598 599 void rpc_mapper_get_page_server( xptr_t xp ); 578 600 579 601 #endif -
trunk/kernel/libk/elf.c
r279 r313 130 130 // @ process : local pointer on process descriptor. 131 131 /////////////////////////////////////////////////////////////////////////////////////// 132 static error_t elf_segments_ load( xptr_t file_xp,133 void * segs_base,134 uint32_t nb_segs,135 process_t * process )132 static error_t elf_segments_register( xptr_t file_xp, 133 void * segs_base, 134 uint32_t nb_segs, 135 process_t * process ) 136 136 { 137 137 error_t error; 138 138 uint32_t index; 139 uint32_t file_size; 140 uint32_t mem_size; 141 intptr_t start; 139 intptr_t file_size; 140 intptr_t mem_size; 141 intptr_t file_offset; 142 intptr_t vbase; 142 143 uint32_t type; 143 144 uint32_t flags; 144 uint32_t offset;145 145 vseg_t * vseg; 146 146 … … 154 154 155 155 // get segment attributes 156 start = seg_ptr->p_vaddr; 157 offset = seg_ptr->p_offset; 158 file_size = seg_ptr->p_filesz; 159 mem_size = seg_ptr->p_memsz; 160 flags = seg_ptr->p_flags; 161 162 // check alignment 163 if( start & CONFIG_PPM_PAGE_MASK ) 164 { 165 printk("\n[WARNING] in %s : segment base not aligned = %x\n", 166 __FUNCTION__, start ); 167 } 168 169 // check size 170 if( file_size != mem_size ) 171 { 172 printk("\n[WARNING] in %s : base = %x / mem_size = %x / file_size = %x\n", 173 __FUNCTION__, start , mem_size , file_size); 174 } 175 176 // set seek on segment base in file 177 error = vfs_lseek( file_xp, 178 offset, 179 SEEK_SET, 180 NULL ); 181 182 if( error ) 183 { 184 printk("\n[ERROR] in %s : failed to seek\n", __FUNCTION__ ); 185 return -1; 186 } 156 vbase = seg_ptr->p_vaddr; // vseg base vaddr 157 mem_size = seg_ptr->p_memsz; // actual vseg size 158 file_offset = seg_ptr->p_offset; // vseg offset in .elf file 159 file_size = seg_ptr->p_filesz; // vseg size in .elf file 160 flags = seg_ptr->p_flags; 187 161 188 162 if( flags & PF_X ) // found CODE segment … … 205 179 // register vseg in VMM 206 180 vseg = (vseg_t *)vmm_create_vseg( process, 207 start,181 vbase, 208 182 mem_size, 209 183 type ); … … 211 185 { 212 186 printk("\n[ERROR] in %s : cannot map segment / base = %x / size = %x\n", 213 __FUNCTION__ , start, mem_size );187 __FUNCTION__ , vbase , mem_size ); 214 188 return -1; 215 189 } 216 190 191 // get .elf file descriptor cluster and local pointer 192 cxy_t file_cxy = GET_CXY( file_xp ); 193 vfs_file_t * file_ptr = (vfs_file_t *)GET_PTR( file_xp ); 194 195 // initialize "file_mapper", "file_offset", "file_size" fields in vseg 196 vseg->file_mapper = hal_remote_lwd( XPTR( file_cxy , &file_ptr->mapper ); 197 vseg->file_offset = file_offset; 198 vseg->file_size = file_size; 199 200 // update reference counter in file descriptor 217 201 vfs_file_count_up( file_xp ); 218 202 } … … 220 204 return 0; 221 205 222 } // end elf_segments_ load()206 } // end elf_segments_register() 223 207 224 208 /////////////////////////////////////////////// … … 323 307 324 308 // register loadable segments in process VMM 325 error = elf_segments_ load( file_xp,326 segs_base,327 header.e_phnum,328 process );309 error = elf_segments_register( file_xp, 310 segs_base, 311 header.e_phnum, 312 process ); 329 313 if( error ) 330 314 { -
trunk/kernel/mm/mapper.c
r296 r313 299 299 } // end mapper_release_page() 300 300 301 //////////////////////////////////////////////// 302 error_t mapper_move_buffer( mapper_t * mapper, 303 bool_t to_buffer, 304 bool_t is_user, 305 uint32_t file_offset, 306 void * buffer, 307 uint32_t size ) 301 /////////////////////////////////////////////////// 302 error_t mapper_move_user( mapper_t * mapper, 303 bool_t to_buffer, 304 uint32_t file_offset, 305 void * buffer, 306 uint32_t size ) 308 307 { 309 308 uint32_t page_offset; // first byte to move to/from a mapper page … … 335 334 else page_offset = 0; 336 335 337 // compute page_count336 // compute number of bytes in page 338 337 if ( first == last ) page_count = size; 339 338 else if ( index == first ) page_count = CONFIG_PPM_PAGE_SIZE - page_offset; … … 359 358 360 359 // move fragment 361 if( to_buffer ) 362 { 363 if( is_user ) hal_copy_to_uspace( buf_ptr , map_ptr , page_count ); 364 else memcpy( buf_ptr , map_ptr , page_count ); 360 if( to_buffer ) 361 { 362 hal_copy_to_uspace( buf_ptr , map_ptr , page_count ); 365 363 } 366 364 else 367 365 { 368 366 page_do_dirty( page ); 369 if( is_user ) hal_copy_from_uspace( map_ptr , buf_ptr , page_count ); 370 else memcpy( map_ptr , buf_ptr , page_count ); 367 hal_copy_from_uspace( map_ptr , buf_ptr , page_count ); 371 368 } 372 369 … … 379 376 return 0; 380 377 381 } // end mapper_move_buffer() 382 378 } // end mapper_move_user() 379 380 //////////////////////////////////////////////// 381 error_t mapper_move_kernel( mapper_t * mapper, 382 bool_t to_buffer, 383 uint32_t file_offset, 384 xptr_t buffer_xp, 385 uint32_t size ) 386 { 387 uint32_t page_offset; // first byte to move to/from a mapper page 388 uint32_t page_count; // number of bytes to move to/from a mapper page 389 uint32_t index; // current mapper page index 390 uint32_t done; // number of moved bytes 391 page_t * page; // current mapper page descriptor 392 393 uint8_t * src_ptr; // source buffer local pointer 394 cxy_t src_cxy; // source cluster 395 uint8_t * dst_ptr; // destination buffer local pointer 396 cxy_t dst_cxy; // destination cluster 397 398 mapper_dmsg("\n[INFO] %s : enters / to_buf = %d / buffer = %l / size = %x / cycle %d\n", 399 __FUNCTION__ , to_buffer , buffer_xp , size , hal_time_stamp() ); 400 401 // compute offsets of first and last bytes in file 402 uint32_t min_byte = file_offset; 403 uint32_t max_byte = file_offset + size -1; 404 405 // compute indexes for first and last pages in mapper 406 uint32_t first = min_byte >> CONFIG_PPM_PAGE_SHIFT; 407 uint32_t last = max_byte >> CONFIG_PPM_PAGE_SHIFT; 408 409 // get buffer cluster and local pointer 410 cxy_t buffer_cxy = GET_CXY( buffer_xp ); 411 uint8_t * buffer_ptr = (uint8_t *)GET_PTR( buffer_xp ); 412 413 // compute source and destination clusters 414 if( to_buffer ) 415 { 416 dst_cxy = buffer_cxy; 417 src_cxy = local_cxy; 418 } 419 else 420 { 421 src_cxy = buffer_cxy; 422 dst_cxy = local_cxy; 423 } 424 425 done = 0; 426 427 // loop on pages in mapper 428 for( index = first ; index <= last ; index++ ) 429 { 430 // compute page_offset 431 if( index == first ) page_offset = min_byte & CONFIG_PPM_PAGE_MASK; 432 else page_offset = 0; 433 434 // compute number of bytes to move in page 435 if ( first == last ) page_count = size; 436 else if ( index == first ) page_count = CONFIG_PPM_PAGE_SIZE - page_offset; 437 else if ( index == last ) page_count = (max_byte & CONFIG_PPM_PAGE_MASK) + 1; 438 else page_count = CONFIG_PPM_PAGE_SIZE; 439 440 mapper_dmsg("\n[INFO] %s : page_index = %d / offset = %d / count = %d\n", 441 __FUNCTION__ , index , page_offset , page_count ); 442 443 // get page descriptor 444 page = mapper_get_page( mapper , index ); 445 446 if ( page == NULL ) return EINVAL; 447 448 // compute source and destination pointers 449 if( to_buffer ) 450 { 451 dst_ptr = buf_ptr + done; 452 src_ptr = (uint8_t *)ppm_page2vaddr( page ) + page_offset; 453 } 454 else 455 { 456 src_ptr = buf_ptr + done; 457 dst_ptr = (uint8_t *)ppm_page2vaddr( page ) + page_offset; 458 459 page_do_dirty( page ); 460 } 461 462 mapper_dmsg("\n[INFO] %s : index = %d / buf_ptr = %x / map_ptr = %x\n", 463 __FUNCTION__ , index , buf_ptr , map_ptr ); 464 465 // move fragment 466 hal_remote_memcpy( XPTR( dst_cxy , dst_ptr ), XPTR( src_cxy , src_ptr ), page_count ); 467 468 done += page_count; 469 } 470 471 mapper_dmsg("\n[INFO] %s : exit for buffer %l / size = %x / cycle %d\n", 472 __FUNCTION__ , buffer_xp , size , hal_time_stamp() ); 473 474 return 0; 475 476 } // end mapper_move_kernel_buffer() 477 -
trunk/kernel/mm/mapper.h
r265 r313 57 57 * - the mapper_move_user() function is used to move data to or from an user buffer. 58 58 * This user space buffer can be physically distributed in several clusters. 59 * - the mapper_move_kernel() function is used to move data to or from a remote kernel 60 * buffer, that can be physically located in any cluster. 59 61 * - In the present implementation the cache size for a given file increases on demand, 60 62 * and the allocated memory is only released when the mapper/inode is destroyed. … … 117 119 118 120 /******************************************************************************************* 119 * This function move data between a mapper and a n user or kernelbuffer.121 * This function move data between a mapper and a - possibly distributed - user buffer. 120 122 * It must be called by a thread running in the cluster containing the mapper. 121 * - A kernel buffer must be entirely contained in the same cluster as the mapper. 122 * - An user buffer can be physically distributed in several clusters. 123 * In both cases, the data transfer is split in "fragments": one fragment contains 123 * It is called by the vfs_user_move() function to implement sys_read() and sys_write(). 124 * If required, the data transfer is split in "fragments", where one fragment contains 124 125 * contiguous bytes in the same mapper page. 125 * - It uses "hal_uspace" accesses to move a fragment to/from the user buffer. 126 * - It uses a simple memcpy" access to move a fragment to/from a kernel buffer. 126 * It uses "hal_uspace" accesses to move a fragment to/from the user buffer. 127 127 * In case of write, the dirty bit is set for all pages written in the mapper. 128 128 * The offset in the file descriptor is not modified by this function. … … 132 132 * @ is_user : user space buffer if true / kernel local buffer if false. 133 133 * @ file_offset : first byte to move in file. 134 * @ buffer : pointer on buffer (local kernel buffer or user spaceaddress in user space.134 * @ buffer : user space pointer on user buffer. 135 135 * @ size : number of bytes to move. 136 136 * returns O if success / returns EINVAL if error. 137 137 ******************************************************************************************/ 138 error_t mapper_move_buffer( mapper_t * mapper, 138 error_t mapper_move_user( mapper_t * mapper, 139 bool_t to_buffer, 140 bool_t is_user, 141 uint32_t file_offset, 142 void * buffer, 143 uint32_t size ); 144 145 /******************************************************************************************* 146 * This function move data between a mapper and a remote kernel buffer. 147 * It must be called by a thread running in the cluster containing the mapper. 148 * If required, the data transfer is split in "fragments", where one fragment contains 149 * contiguous bytes in the same mapper page. 150 * It uses a "remote_memcpy" to move a fragment to/from the kernel buffer. 151 * In case of write, the dirty bit is set for all pages written in the mapper. 152 * The offset in the file descriptor is not modified by this function. 153 ******************************************************************************************* 154 * @ mapper : local pointer on mapper. 155 * @ to_buffer : mapper -> buffer if true / buffer -> mapper if false. 156 * @ file_offset : first byte to move in file. 157 * @ buffer_xp : extended pointer on kernel buffer. 158 * @ size : number of bytes to move. 159 * returns O if success / returns EINVAL if error. 160 ******************************************************************************************/ 161 error_t mapper_move_kernel( mapper_t * mapper, 139 162 bool_t to_buffer, 140 bool_t is_user,141 163 uint32_t file_offset, 142 void * buffer,164 xptr_t buffer_xp, 143 165 uint32_t size ); 166 144 167 145 168 /******************************************************************************************* -
trunk/kernel/mm/ppm.c
r177 r313 61 61 } 62 62 63 //////////////////////////////////////////64 inline ppn_t ppm_page2ppn( page_t * page )65 {66 ppm_t * ppm = &LOCAL_CLUSTER->ppm;67 return (ppn_t)( page - ppm->pages_tbl );68 }69 70 /////////////////////////////////////////71 inline page_t * ppm_ppn2page( ppn_t ppn )72 {73 ppm_t * ppm = &LOCAL_CLUSTER->ppm;74 return &ppm->pages_tbl[ppn];75 }76 77 ///////////////////////////////////////78 inline void * ppm_ppn2vaddr( ppn_t ppn )79 {80 ppm_t * ppm = &LOCAL_CLUSTER->ppm;81 return ppm->vaddr_base + (ppn << CONFIG_PPM_PAGE_SHIFT);82 }83 84 //////////////////////////////////////////85 inline ppn_t ppm_vaddr2ppn( void * vaddr )86 {87 ppm_t * ppm = &LOCAL_CLUSTER->ppm;88 return ( (ppm->vaddr_base - vaddr) >> CONFIG_PPM_PAGE_SHIFT );89 }90 91 92 63 /////////////////////////////////////////// 93 64 void ppm_free_pages_nolock( page_t * page ) -
trunk/kernel/mm/ppm.h
r160 r313 123 123 124 124 /***************************************************************************************** 125 * Get the PPN from the page descriptor pointer.126 *****************************************************************************************127 * @ page : pointer to page descriptor128 * @ returns physical page number129 ****************************************************************************************/130 inline ppn_t ppm_page2ppn( page_t * page );131 132 /*****************************************************************************************133 * Get the page descriptor pointer from the PPN.134 *****************************************************************************************135 * @ ppn : physical page number136 * @ returns pointer on page descriptor137 ****************************************************************************************/138 inline page_t * ppm_ppn2page( ppn_t ppn );139 140 /*****************************************************************************************141 * Get the page virtual address from the PPN.142 *****************************************************************************************143 * @ ppn : physical page number144 * @ returns page virtual address.145 ****************************************************************************************/146 inline void* ppm_ppn2vaddr( ppn_t ppn );147 148 /*****************************************************************************************149 * Get the PPN from the page virtual address.150 *****************************************************************************************151 * @ vaddr : page virtual address152 * @ returns physical page number.153 ****************************************************************************************/154 inline ppn_t ppm_vaddr2ppn( void * base );155 156 /*****************************************************************************************157 125 * This function prints the PPM allocator status. 158 126 ***************************************************************************************** -
trunk/kernel/mm/vmm.c
r286 r313 741 741 } 742 742 743 //////////////////////////////////////// 744 error_t vmm_get_one_ppn( vseg_t * vseg, 745 vpn_t vpn, 746 ppn_t * ppn ) 747 { 748 error_t error; 749 cxy_t page_cxy; // physical page cluster 750 page_t * page_ptr; // local pointer on physical page descriptor 751 752 uint32_t type = vseg->type; 753 xptr_t mapper_xp = vseg->mapper_xp; 754 755 // get mapper cluster and local pointer 756 cxy_t mapper_cxy = GET_CXY( mapper_xp ); 757 mapper_t * mapper_ptr = (mapper_t *)GET_PTR( mapper_xp ); 758 759 // FILE type : simply get the physical page from the file mapper 760 if( type == VSEG_TYPE_FILE ) 761 { 762 // compute index in file mapper 763 uint32_t index = vpn - vseg->vpn_base; 764 765 // get page descriptor from mapper 766 if( mapper_cxy == local_cxy ) // mapper is local 767 { 768 page_ptr = mapper_get_page( mapper_ptr , index ); 769 } 770 else // mapper is remote 771 { 772 rpc_mapper_get_page_client( mapper_cxy , mapper_ptr , index , &page_ptr ); 773 } 774 775 if ( page_ptr == NULL ) return EINVAL; 776 777 page_cxy = mapper_cxy; 778 } 779 780 // all other types : allocate a physical page from target cluster, 781 else 782 { 783 // get target cluster for physical page 784 if( flags & VSEG_DISTRIB ) // depends on VPN LSB 785 { 786 uint32_t x_width = LOCAL_CLUSTER->x_width; 787 uint32_t y_width = LOCAL_CLUSTER->y_width; 788 page_cxy = vpn & ((1<<(x_width + y_width)) - 1); 789 } 790 else // defined in vseg descriptor 791 { 792 page_cxy = vseg->cxy; 793 } 794 795 // allocate a physical page in target cluster 796 kmem_req_t req; 797 if( page_cxy == local_cxy ) // target cluster is the local cluster 798 { 799 req.type = KMEM_PAGE; 800 req.size = 0; 801 req.flags = AF_NONE; 802 page_ptr = (page_t *)kmem_alloc( &req ); 803 } 804 else // target cluster is not the local cluster 805 { 806 rpc_pmem_get_pages_client( page_cxy , 0 , &page_ptr ); 807 } 808 809 if( page_ptr == NULL ) return ENOMEM; 810 811 // initialise page from .elf file mapper for DATA and CODE types 812 if( (type == VSEG_TYPE_CODE) || (type == VSEG_TYPE_DATA) ) 813 { 814 // compute missing page index in vseg 815 vpn_t page_index = vpn - vseg->vpn_base; 816 817 // compute missing page offset in .elf file 818 intptr_t page_offset = vseg->file_offset + (page_index << PPM_PAGE_SHIFT); 819 820 // compute both local and extended pointer on page first byte 821 // WARNING : the pages_offset must have the same value in all clusters !!! 822 // to use this ppm_page2vaddr() function for a remote cluster 823 uint8_t * base_ptr = (uint8_t *)ppm_page2vaddr( page_ptr ); 824 xptr_t base_xp = XPTR( page_cxy , base_ptr ); 825 826 // file_size can be smaller than vseg_size for BSS 827 file_size = vseg->file_size; 828 829 if( file_size < page_offset ) // page fully in BSS 830 { 831 if( page_cxy == local_cxy ) 832 { 833 memset( base_ptr , 0 , PPM_PAGE_SIZE ); 834 } 835 else 836 { 837 hal_remote_memset( base_xp , 0 , PPM_PAGE_SIZE ); 838 } 839 } 840 else if( file size >= (page_offset + PPM_PAGE_SIZE) ) // page fully in mapper 841 842 if( mapper_cxy == local_cxy ) 843 { 844 error = mapper_move_kernel( mapper_ptr, 845 true, // to_buffer 846 page_offset, 847 base_xp, 848 PPM_PAGE_SIZE ); 849 } 850 else 851 { 852 rpc_mapper_move_buffer_client( mapper_cxy, 853 mapper_ptr, 854 true, // to buffer 855 false, // kernel buffer 856 page_offset, 857 (uint64_t)base_xp, 858 PPM_PAGE_SIZE, 859 &error ); 860 } 861 if( error ) return EINVAL; 862 } 863 else // in mapper : from page_offset -> (file_size - page_offset) 864 // in BSS : from file_size -> (page_offset + page_size) 865 { 866 // initialize mapper part 867 if( mapper_cxy == local_cxy ) // mapper is local 868 { 869 error = mapper_move_kernel( mapper_ptr, 870 true, // to_buffer 871 page_offset, 872 base_xp, 873 file_size - page_offset ); 874 } 875 else // mapper is remote 876 { 877 rpc_mapper_move_buffer_client( mapper_cxy, 878 mapper_ptr, 879 true, 880 false, // kernel buffer 881 page_offset, 882 (uint64_t)base_xp, 883 file_size - page_offset, 884 &error ); 885 } 886 if( error ) return EINVAL; 887 888 // initialize BSS part 889 if( page_cxy == local_cxy ) 890 { 891 memset( base_ptr + file_size - page_offset , 0 , 892 page_offset + PPM_PAGE_SIZE - file_size ); 893 } 894 else 895 { 896 hal_remote_memset( base_xp + file_size - page_offset , 0 , 897 page_offset + PPM_PAGE_SIZE - file_size ); 898 } 899 } 900 } // end initialisation for CODE or DATA types 901 } 902 903 // return ppn 904 *ppn = hal_page2ppn( XPTR( page_cxy , page_ptr ) ); 905 return 0; 906 907 } // end vmm_get_one_ppn() 908 743 909 ///////////////////////////////////////// 744 910 error_t vmm_get_pte( process_t * process, … … 748 914 { 749 915 vseg_t * vseg; // pointer on vseg containing VPN 750 ppn_t ppn; // PPN from GPT entry916 ppn_t ppn; // physical page number 751 917 uint32_t attr; // attributes from GPT entry 752 918 error_t error; … … 762 928 hal_gpt_get_pte( &vmm->gpt , vpn , &attr , &ppn ); 763 929 764 // if PTE unmapped => allocate one small physical page to map it 930 // if PTE is unmapped 931 // 1) get VSEG containing the missing VPN 932 // 2) get & initialize physical page (depending on vseg type), 933 // 3) register the PTE in reference GPT 765 934 if( (attr & GPT_MAPPED) == 0 ) 766 935 { 767 // get vseg pointer936 // 1. get vseg pointer 768 937 vseg = vmm_get_vseg( process , vpn<<CONFIG_PPM_PAGE_SHIFT ); 769 938 … … 775 944 } 776 945 777 // select the target cluster for physical mapping 778 uint32_t target_cxy; 779 if( vseg->flags & VSEG_DISTRIB ) // depends on VPN LSB 780 { 781 uint32_t x_width = LOCAL_CLUSTER->x_width; 782 uint32_t y_width = LOCAL_CLUSTER->y_width; 783 target_cxy = vpn & ((1<<(x_width + y_width)) - 1); 784 } 785 else // defined in vseg descriptor 786 { 787 target_cxy = vseg->cxy; 788 } 789 790 // allocate memory for page fault 791 kmem_req_t req; 792 page_t * page; 793 if( target_cxy == local_cxy ) // target cluster is the local cluster 794 { 795 req.type = KMEM_PAGE; 796 req.size = 0; 797 req.flags = AF_NONE; 798 page = (page_t *)kmem_alloc( &req ); 799 800 error = ( page == NULL ) ? 1 : 0; 801 ppn = ppm_page2ppn( page ); 802 } 803 else // target cluster is not the local cluster 804 { 805 rpc_pmem_get_pages_client( target_cxy , 0 , &error , &ppn ); 806 } 946 // 2. get physical page number, depending on vseg type 947 error = vmm_get_one_ppn( vseg , vpn , &ppn ); 807 948 808 949 if( error ) … … 810 951 printk("\n[ERROR] in %s : cannot allocate memory / process = %x / vpn = %x\n", 811 952 __FUNCTION__ , process->pid , vpn ); 812 return ENOMEM;953 return error; 813 954 } 814 955 815 // define GPT attributes from vseg flags956 // 3. define attributes from vseg flags and register in GPT 816 957 attr = GPT_MAPPED | GPT_SMALL; 817 958 if( vseg->flags & VSEG_USER ) attr |= GPT_USER; … … 820 961 if( vseg->flags & VSEG_CACHE ) attr |= GPT_CACHABLE; 821 962 822 // set the missing PTE in local VMM823 963 error = hal_gpt_set_pte( &vmm->gpt , vpn , ppn , attr ); 964 824 965 if( error ) 825 966 { … … 828 969 return ENOMEM; 829 970 } 830 } 971 } // end new PTE 831 972 832 973 *ret_ppn = ppn; 833 974 *ret_attr = attr; 834 975 return 0; 835 } 976 977 } // end vmm_get_pte() 836 978 837 979 /////////////////////////////////////////////////// … … 851 993 process_t * ref_ptr = (process_t *)GET_PTR( process->ref_xp ); 852 994 853 // get missing PTE attributes and PPN 995 // get missing PTE attributes and PPN from reference cluster 854 996 if( local_cxy != ref_cxy ) // local cluster is not the reference cluster 855 997 { … … 879 1021 880 1022 return 0; 881 } 1023 1024 } // end vmm_handle_page_fault() 1025 882 1026 883 1027 /////////////////////////////////////////// … … 919 1063 920 1064 return error; 1065 921 1066 } 922 923 1067 /* 924 1068 -
trunk/kernel/mm/vmm.h
r68 r313 272 272 intptr_t vaddr ); 273 273 274 274 275 /********************************************************************************************* 275 276 * This function is called by the generic exception handler when a page fault … … 291 292 /********************************************************************************************* 292 293 * This function returns in the "attr" and "ppn" arguments the PTE associated to a given 293 * VPN for a given process. This function must be called on the reference cluster.294 * To get the PTE from another cluster, use the RPC_VMM_GET_PTE.294 * VPN for a given process. This function must be called by a thread running in the 295 * reference cluster. To get the PTE from another cluster, use the RPC_VMM_GET_PTE. 295 296 * The vseg containing the searched VPN should be registered in the reference VMM. 296 297 * If the PTE in the reference page table is unmapped, this function allocates the missing 297 * physical page from the target cluster defined by the vseg type, and update the reference298 * page table. It can call a RPC_PMEM_GET_PAGES to get the missing physical page,299 * i f the target cluster is not the reference cluster.298 * physical page from the target cluster defined by the vseg type, initilize it, 299 * and update the reference page table. It calls the RPC_PMEM_GET_PAGES to get and 300 * initialize the missing physical page, if the target cluster is not the reference cluster. 300 301 ********************************************************************************************* 301 302 * @ process : [in] pointer on process descriptor. … … 309 310 uint32_t * attr, 310 311 ppn_t * ppn ); 312 313 /********************************************************************************************* 314 * This function is called by the vmm_get_pte() function. 315 * Depending on the vseg type, defined by the <vseg> argument, it returns the PPN 316 * (Physical Page Number) associated to a missing page defined by the <vpn> argument. 317 * - For the VSEG_TYPE_FILE, it returns the physical page from the file mapper. 318 * For all other types, it allocates a new physical page from the cluster defined 319 * by the <vseg->cxy> field, or by the <vpn> MSB bits for a distributed vseg. 320 * - For the VSEG_TYPE_CODE and VSEG_TYPE_DATA types, the allocated page is initialized 321 * from the .elf file mapper. For others vseg types it is not initialised. 322 ********************************************************************************************* 323 * @ vseg : local pointer on vseg containing the mising page. 324 * @ vpn : Virtual Page Number identifying the missing page. 325 * @ ppn : [out] returned Physical Page Number. 326 ********************************************************************************************/ 327 error_t vmm_get_one_ppn( vseg_t * vseg, 328 vpn_t vpn, 329 ppn_t * ppn ); 311 330 312 331 /********************************************************************************************* -
trunk/kernel/mm/vseg.h
r101 r313 81 81 vpn_t vpn_size; /*! number of pages occupied */ 82 82 uint32_t flags; /*! vseg attributes */ 83 xptr_t mapper; /*! extended pointer on associated mapper*/84 fdid_t fdid; /*! associated fdid for a VSEG_TYPE_FILE*/85 uint32_t offset; /*! offset in file for a VSEG_TYPE_FILE*/86 cxy_t cxy; /*! target cluster for physical mapping */83 xptr_t file_mapper; /*! xptr on remote mapper (for types CODE / DATA / FILE) */ 84 intptr_t file_offset; /*! offset in file (for types CODE / DATA / FILE) */ 85 intptr_t file_size; /*! max segment size in mapper (for type DATA) */ 86 cxy_t cxy; /*! cluster for physical mapping (for non distributed) */ 87 87 } 88 88 vseg_t; -
trunk/kernel/syscalls/sys_read.c
r265 r313 41 41 { 42 42 error_t error; 43 paddr_t paddr; 44 char kbuf[CONFIG_VFS_KBUF_SIZE]; 45 43 paddr_t paddr; // required for user space checking 46 44 xptr_t file_xp; // remote file extended pointer 47 uint32_t nbytes; // number of bytes in one iteration48 45 49 46 thread_t * this = CURRENT_THREAD; … … 94 91 return -1; 95 92 } 96 97 // transfer at most CONFIG_VFS_KBUF_SIZE bytes per iteration 98 while( count ) 93 94 // transfer count bytes directly from mapper to user buffer 95 error = vfs_user_move( true, // to_buffer 96 file_xp , 97 buf, 98 count ); 99 100 if( error ) 99 101 { 100 if( count <= CONFIG_VFS_KBUF_SIZE ) 101 { 102 nbytes = count; 103 count = 0; 104 } 105 else 106 { 107 nbytes = CONFIG_VFS_KBUF_SIZE; 108 count = count - CONFIG_VFS_KBUF_SIZE; 109 } 110 111 // transfer nbytes to kernel buffer 112 error = vfs_move( true, // to_buffer 113 true, // is_user 114 file_xp , 115 kbuf , 116 nbytes ); 117 118 if( error ) 119 { 120 printk("\n[ERROR] in %s cannot read data from file %d\n", 121 __FUNCTION__ , file_id ); 122 this->errno = error; 123 return -1; 124 } 125 126 // copy kernel buffer to user space 127 hal_copy_to_uspace( buf , kbuf , nbytes ); 102 printk("\n[ERROR] in %s cannot read data from file %d\n", 103 __FUNCTION__ , file_id ); 104 this->errno = error; 105 return -1; 128 106 } 129 107 -
trunk/kernel/syscalls/sys_write.c
r265 r313 40 40 { 41 41 error_t error; 42 paddr_t paddr; 43 char kbuf[CONFIG_VFS_KBUF_SIZE]; 44 xptr_t file_xp; // remote file extended pointer 45 uint32_t nbytes; // number of bytes in one iteration 42 paddr_t paddr; // required for user space checking 43 xptr_t file_xp; // remote file extended pointer 46 44 47 45 thread_t * this = CURRENT_THREAD; … … 92 90 } 93 91 94 // transfer at most CONFIG_VFS_KBUF_SIZE bytes per iteration 95 while( count ) 92 // transfer count bytes directly from user buffer to mapper 93 error = vfs_user_move( false, // from buffer 94 file_xp, 95 kbuf , 96 nbytes ); 97 98 if( error ) 96 99 { 97 if( count <= CONFIG_VFS_KBUF_SIZE ) 98 { 99 nbytes = count; 100 count = 0; 101 } 102 else 103 { 104 nbytes = CONFIG_VFS_KBUF_SIZE; 105 count = count - CONFIG_VFS_KBUF_SIZE; 106 } 107 108 // copy user buffer to kernel buffer 109 hal_copy_to_uspace( buf , kbuf , nbytes ); 110 111 // transfer nbytes from kernel buffer 112 error = vfs_move( false, // from buffer 113 true, // is_user 114 file_xp, 115 kbuf , 116 nbytes ); 117 118 if( error ) 119 { 120 printk("\n[ERROR] in %s cannot read data from file %d\n", 121 __FUNCTION__ , file_id ); 122 this->errno = error; 123 return -1; 124 } 100 printk("\n[ERROR] in %s cannot read data from file %d\n", 101 __FUNCTION__ , file_id ); 102 this->errno = error; 103 return -1; 125 104 } 126 105 -
trunk/kernel/vfs/vfs.c
r296 r313 662 662 } // end vfs_open() 663 663 664 ///////////////////////////////////// 665 error_t vfs_move( bool_t to_buffer, 666 bool_t is_user, 667 xptr_t file_xp, 668 void * buffer, 669 uint32_t size ) 664 ////////////////////////////////////////// 665 error_t vfs_user_move( bool_t to_buffer, 666 xptr_t file_xp, 667 void * buffer, 668 uint32_t size ) 670 669 { 671 670 assert( ( file_xp != XPTR_NULL ) , __FUNCTION__ , "file_xp == XPTR_NULL" ); … … 695 694 if( file_cxy == local_cxy ) 696 695 { 697 error = mapper_move_buffer( mapper, 698 to_buffer, 699 is_user, 700 file_offset, 701 buffer, 702 size ); 696 error = mapper_move_user( mapper, 697 to_buffer, 698 file_offset, 699 buffer, 700 size ); 703 701 } 704 702 else 705 703 { 706 rpc_mapper_move_ buffer_client( file_cxy,707 708 709 is_user,710 711 712 713 704 rpc_mapper_move_user_client( file_cxy, 705 mapper, 706 to_buffer, 707 true, // user buffer 708 file_offset, 709 (uint64_t)(intptr_t)buffer, 710 size, 711 &error ); 714 712 } 715 713 … … 722 720 return -1; 723 721 } 724 } // end vfs_ move()722 } // end vfs_user_move() 725 723 726 724 ////////////////////////////////////// -
trunk/kernel/vfs/vfs.h
r296 r313 732 732 733 733 /****************************************************************************************** 734 * This function moves <size> bytes between the file identified by the open file descriptor735 * <file_xp> a nd a local kernel or user <buffer>, as defined by the <is_user> argument,736 * a nd taken into account the offset in <file_xp>.737 * Th e transfer direction is defined by the <to_buffer> argument.734 * This function moves <size> bytes between a remote file mapper, identified by the 735 * <file_xp> argument, and a - possibly distributed - user space <buffer>, taken into 736 * account the offset in <file_xp>. The transfer direction is defined by <to_buffer>. 737 * This function is called by the sys_read() and sys_write() system calls. 738 738 ****************************************************************************************** 739 739 * @ to_buffer : mapper -> buffer if true / buffer -> mapper if false. 740 * @ is_user : user space buffer if true / local kernel buffer if false.741 740 * @ file_xp : extended pointer on the remote file descriptor. 742 * @ buffer : local pointer on buffer.741 * @ buffer : user space pointer on buffer (can be physically distributed). 743 742 * @ size : requested number of bytes from offset. 744 743 * @ returns number of bytes actually transfered / -1 if error. 745 744 *****************************************************************************************/ 746 error_t vfs_move( bool_t to_buffer, 747 bool_t is_user, 748 xptr_t file_xp, 749 void * buffer, 750 uint32_t size ); 745 error_t vfs_user_move( bool_t to_buffer, 746 xptr_t file_xp, 747 void * buffer, 748 uint32_t size ); 751 749 752 750 /******************************************************************************************
Note: See TracChangeset
for help on using the changeset viewer.