Changeset 614
- Timestamp:
- Jan 15, 2019, 1:59:32 PM (6 years ago)
- Location:
- trunk/kernel
- Files:
-
- 23 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/kernel/devices/dev_ioc.c
r605 r614 91 91 // This static function is called by dev_ioc_read() & dev_ioc_write() functions. 92 92 // It builds and registers the command in the calling thread descriptor. 93 // Then, it registers the calling thead in IOC chdev waiting queue.93 // Then, it registers the calling thead in IOC chdev waiting queue. 94 94 // Finally it blocks on the THREAD_BLOCKED_IO condition and deschedule. 95 95 ////////////////////////////////////i///////////////////////////////////////////// … … 108 108 if( chdev_dir.iob ) 109 109 { 110 if ( cmd_type == IOC_READ) dev_mmc_inval( XPTR( local_cxy , buffer ) , count<<9 );111 else 110 if (cmd_type == IOC_READ) dev_mmc_inval( XPTR( local_cxy , buffer ) , count<<9 ); 111 else dev_mmc_sync ( XPTR( local_cxy , buffer ) , count<<9 ); 112 112 } 113 113 … … 162 162 163 163 return dev_ioc_access( IOC_READ , buffer , lba , count ); 164 165 #if DEBUG_DEV_IOC_RX166 cycle = (uint32_t)hal_get_cycles();167 if( DEBUG_DEV_IOC_RX < cycle )168 printk("\n[%s] thread[%x,%x] exit / lba %x / buffer %x / cycle %d\n",169 __FUNCTION__ , this->process->pid, this->trdid, lba, buffer, cycle );170 #endif171 172 164 } 173 165 … … 187 179 188 180 return dev_ioc_access( IOC_WRITE , buffer , lba , count ); 189 190 #if DEBUG_DEV_IOC_TX191 cycle = (uint32_t)hal_get_cycles();192 if( DEBUG_DEV_IOC_TX < cycle )193 printk("\n[%s] thread[%x,%x] exit / lba %x / buffer %x / cycle %d\n",194 __FUNCTION__ , this->process->pid, this->trdid, lba, buffer, cycle );195 #endif196 197 181 } 182 183 184 185 186 187 ////////////////////////////////////////////////////////////////////////////////// 188 // This static function is called by dev_ioc_sync_read() & dev_ioc_sync_write(). 189 // It builds and registers the command in the calling thread descriptor, and 190 // calls directly the blocking IOC driver command, that returns only when the 191 // IO operation is completed. 192 ////////////////////////////////////i///////////////////////////////////////////// 193 error_t dev_ioc_sync_access( uint32_t cmd_type, 194 uint8_t * buffer, 195 uint32_t lba, 196 uint32_t count ) 197 { 198 // get pointer on calling thread 199 thread_t * this = CURRENT_THREAD; 200 201 // software L2/L3 cache coherence for memory buffer 202 if( chdev_dir.iob ) 203 { 204 if (cmd_type == IOC_SYNC_READ) dev_mmc_inval( XPTR(local_cxy,buffer) , count<<9 ); 205 else dev_mmc_sync ( XPTR(local_cxy,buffer) , count<<9 ); 206 } 207 208 // get extended pointer on IOC[0] chdev 209 xptr_t ioc_xp = chdev_dir.ioc[0]; 210 211 // check ioc_xp 212 assert( (ioc_xp != XPTR_NULL) , "undefined IOC chdev descriptor" ); 213 214 // register command in calling thread descriptor 215 this->ioc_cmd.dev_xp = ioc_xp; 216 this->ioc_cmd.type = cmd_type; 217 this->ioc_cmd.buf_xp = XPTR( local_cxy , buffer ); 218 this->ioc_cmd.lba = lba; 219 this->ioc_cmd.count = count; 220 221 // get driver command function 222 cxy_t ioc_cxy = GET_CXY( ioc_xp ); 223 chdev_t * ioc_ptr = GET_PTR( ioc_xp ); 224 dev_cmd_t * cmd = (dev_cmd_t *)hal_remote_lpt( XPTR( ioc_cxy , &ioc_ptr->cmd ) ); 225 226 // get core local index for the core handling the IOC IRQ 227 thread_t * server = (thread_t *)hal_remote_lpt( XPTR( ioc_cxy , &ioc_ptr->server ) ); 228 core_t * core = (core_t *)hal_remote_lpt( XPTR( ioc_cxy , &server->core ) ); 229 lid_t lid = (lid_t)hal_remote_l32( XPTR( ioc_cxy , &core->lid ) ); 230 231 // mask the IRQ 232 dev_pic_disable_irq( lid , ioc_xp ); 233 234 // call driver function 235 cmd( XPTR( local_cxy , this ) ); 236 237 // unmask the IRQ 238 dev_pic_enable_irq( lid , ioc_xp ); 239 240 // return I/O operation status from calling thread descriptor 241 return this->ioc_cmd.error; 242 243 } // end ioc_sync_access() 198 244 199 245 ///////////////////////////////////////////// … … 202 248 uint32_t count ) 203 249 { 204 // get pointer on calling thread205 thread_t * this = CURRENT_THREAD;206 250 207 251 #if DEBUG_DEV_IOC_RX 208 uint32_t cycle = (uint32_t)hal_get_cycles(); 252 thread_t * this = CURRENT_THREAD; 253 uint32_t cycle = (uint32_t)hal_get_cycles(); 254 if( DEBUG_DEV_IOC_RX < cycle ) 255 printk("\n[%s] thread[%x,%x] : lba %x / buffer %x / cycle %d\n", 256 __FUNCTION__ , this->process->pid, this->trdid, lba, buffer, cycle ); 257 #endif 258 259 return dev_ioc_sync_access( IOC_SYNC_READ , buffer , lba , count ); 260 } 261 262 ////////////////////////////////////////////// 263 error_t dev_ioc_sync_write( uint8_t * buffer, 264 uint32_t lba, 265 uint32_t count ) 266 { 267 268 #if DEBUG_DEV_IOC_RX 269 thread_t * this = CURRENT_THREAD; 270 uint32_t cycle = (uint32_t)hal_get_cycles(); 209 271 if( DEBUG_DEV_IOC_RX < cycle ) 210 272 printk("\n[%s] thread[%x,%x] enters / lba %x / buffer %x / cycle %d\n", … … 212 274 #endif 213 275 214 // software L2/L3 cache coherence for memory buffer 215 if( chdev_dir.iob ) dev_mmc_inval( XPTR( local_cxy , buffer ) , count<<9 ); 216 217 // get extended pointer on IOC[0] chdev 218 xptr_t ioc_xp = chdev_dir.ioc[0]; 219 220 assert( (ioc_xp != XPTR_NULL) , "undefined IOC chdev descriptor" ); 221 222 // register command in calling thread descriptor 223 this->ioc_cmd.dev_xp = ioc_xp; 224 this->ioc_cmd.type = IOC_SYNC_READ; 225 this->ioc_cmd.buf_xp = XPTR( local_cxy , buffer ); 226 this->ioc_cmd.lba = lba; 227 this->ioc_cmd.count = count; 228 229 // get driver command function 230 cxy_t ioc_cxy = GET_CXY( ioc_xp ); 231 chdev_t * ioc_ptr = (chdev_t *)GET_PTR( ioc_xp ); 232 dev_cmd_t * cmd = (dev_cmd_t *)hal_remote_lpt( XPTR( ioc_cxy , &ioc_ptr->cmd ) ); 233 234 // get core local index for the core handling the IOC IRQ 235 thread_t * server = (thread_t *)hal_remote_lpt( XPTR( ioc_cxy , &ioc_ptr->server ) ); 236 core_t * core = (core_t *)hal_remote_lpt( XPTR( ioc_cxy , &server->core ) ); 237 lid_t lid = (lid_t)hal_remote_l32( XPTR( ioc_cxy , &core->lid ) ); 238 239 // mask the IRQ 240 dev_pic_disable_irq( lid , ioc_xp ); 241 242 // call driver function 243 cmd( XPTR( local_cxy , this ) ); 244 245 // unmask the IRQ 246 dev_pic_enable_irq( lid , ioc_xp ); 247 248 #if DEBUG_DEV_IOC_RX 249 cycle = (uint32_t)hal_get_cycles(); 250 if( DEBUG_DEV_IOC_RX < cycle ) 251 printk("\n[%s] thread[%x,%x] exit / lba %x / buffer %x / cycle %d\n", 252 __FUNCTION__ , this->process->pid, this->trdid, lba, buffer, cycle ); 253 #endif 254 255 // return I/O operation status from calling thread descriptor 256 return this->ioc_cmd.error; 257 258 } // end ioc_sync_read() 259 276 return dev_ioc_sync_access( IOC_SYNC_WRITE , buffer , lba , count ); 277 } 278 -
trunk/kernel/devices/dev_ioc.h
r457 r614 38 38 * magnetic hard disk or a SD card, that can store blocks of data in a linear array 39 39 * of sectors indexed by a simple lba (logic block address). 40 * It supports three command types: 41 * - READ : move blocks from device to memory, with a descheduling policy. 42 * - WRITE : move blocks from memory to device, with a descheduling policy. 43 * - SYNC_READ : move blocks from device to memory, with a busy waiting policy. 40 * It supports four command types: 41 * - READ : move blocks from device to memory, with a descheduling policy. 42 * - WRITE : move blocks from memory to device, with a descheduling policy. 43 * - SYNC_READ : move blocks from device to memory, with a busy waiting policy. 44 * - SYNC_WRITE : move blocks from memory to device, with a busy waiting policy. 44 45 45 46 * A READ or WRITE operation requires dynamic ressource allocation. The calling thread … … 64 65 * 3) release the WTI mailbox to the client cluster WTI allocator. 65 66 * 66 * The SYNC_READ operation is used by the kernel in the initialisation phase. It does67 * not uses the IOC device waiting queue and server thread, and does not use the IOC IRQ,68 * but implement a busy-waiting policy for the calling thread.67 * The SYNC_READ and SYNC_WRITE operations are used by the kernel in the initialisation 68 * phase. These operations do not not use the IOC device waiting queue, the server thread, 69 * and the IOC IRQ, but implement a busy-waiting policy for the calling thread. 69 70 *****************************************************************************************/ 70 71 … … 85 86 *****************************************************************************************/ 86 87 87 enum ioc_impl_e 88 typedef enum 88 89 { 89 90 IMPL_IOC_BDV = 0, … … 99 100 *****************************************************************************************/ 100 101 101 enum102 typedef enum 102 103 { 103 104 IOC_READ = 0, 104 105 IOC_WRITE = 1, 105 106 IOC_SYNC_READ = 2, 106 }; 107 IOC_SYNC_WRITE = 3, 108 } 109 cmd_type_t; 107 110 108 111 typedef struct ioc_command_s … … 131 134 132 135 /****************************************************************************************** 133 * This blocking function try to tranferone or several contiguous blocks of data136 * This blocking function moves one or several contiguous blocks of data 134 137 * from the block device to a local memory buffer. The corresponding request is actually 135 138 * registered in the device pending request queue, and the calling thread is descheduled, … … 147 150 148 151 /****************************************************************************************** 149 * This blocking function try to tranferone or several contiguous blocks of data152 * This blocking function moves one or several contiguous blocks of data 150 153 * from a local memory buffer to the block device. The corresponding request is actually 151 154 * registered in the device pending request queue, and the calling thread is descheduled, … … 163 166 164 167 /****************************************************************************************** 165 * This blocking function try to tranferone or several contiguous blocks of data166 * from the block device to a memory buffer.168 * This blocking function moves one or several contiguous blocks of data 169 * from the block device to a local memory buffer. 167 170 * It does not uses the IOC device waiting queue and server thread, and does not use 168 171 * the IOC IRQ, but call directly the relevant IOC driver, implementing a busy-waiting … … 179 182 uint32_t count ); 180 183 184 /****************************************************************************************** 185 * This blocking function moves one or several contiguous blocks of data 186 * from a local memory buffer to the block device. 187 * It does not uses the IOC device waiting queue and server thread, and does not use 188 * the IOC IRQ, but call directly the relevant IOC driver, implementing a busy-waiting 189 * policy for the calling thread. 190 * It must be called in the client cluster. 191 ****************************************************************************************** 192 * @ buffer : local pointer on source buffer in memory (must be block aligned). 193 * @ lba : first block index on device. 194 * @ count : number of blocks to transfer. 195 * @ returns 0 if success / returns EINVAL if error. 196 *****************************************************************************************/ 197 error_t dev_ioc_sync_write( uint8_t * buffer, 198 uint32_t lba, 199 uint32_t count ); 200 181 201 #endif /* _DEV_IOC_H */ -
trunk/kernel/fs/devfs.c
r612 r614 110 110 assert( (error == 0) , "cannot create <dev>\n" ); 111 111 112 #if DEBUG_DEVFS_ INIT112 #if DEBUG_DEVFS_GLOBAL_INIT 113 113 uint32_t cycle = (uint32_t)hal_get_cycles(); 114 114 thread_t * this = CURRENT_THREAD; 115 if( DEBUG_DEVFS_ INIT < cycle )115 if( DEBUG_DEVFS_GLOBAL_INIT < cycle ) 116 116 printk("\n[%s] thread[%x,%x] created <dev> inode / cycle %d\n", 117 117 __FUNCTION__, this->process->pid, this->trdid, cycle ); … … 134 134 assert( (error == 0) , "cannot create <external>\n" ); 135 135 136 #if DEBUG_DEVFS_ INIT137 cycle = (uint32_t)hal_get_cycles(); 138 if( DEBUG_DEVFS_ INIT < cycle )136 #if DEBUG_DEVFS_GLOBAL_INIT 137 cycle = (uint32_t)hal_get_cycles(); 138 if( DEBUG_DEVFS_GLOBAL_INIT < cycle ) 139 139 printk("\n[%s] thread[%x,%x] created <external> inode / cycle %d\n", 140 140 __FUNCTION__, this->process->pid, this->trdid, cycle ); … … 159 159 error_t error; 160 160 161 #if DEBUG_DEVFS_LOCAL_INIT 162 uint32_t cycle = (uint32_t)hal_get_cycles(); 163 thread_t * this = CURRENT_THREAD; 164 if( DEBUG_DEVFS_LOCAL_INIT < cycle ) 165 printk("\n[%s] thread[%x,%x] enter / cycle %d\n", 166 __FUNCTION__, this->process->pid, this->trdid, cycle ); 167 #endif 168 161 169 // create "internal" directory 162 170 snprintf( node_name , 16 , "internal_%x" , local_cxy ); … … 177 185 assert( (error == 0) , "cannot create <external>\n" ); 178 186 179 #if DEBUG_DEVFS_INIT 180 uint32_t cycle = (uint32_t)hal_get_cycles(); 181 thread_t * this = CURRENT_THREAD; 182 if( DEBUG_DEVFS_INIT < cycle ) 187 #if DEBUG_DEVFS_LOCAL_INIT 188 cycle = (uint32_t)hal_get_cycles(); 189 if( DEBUG_DEVFS_LOCAL_INIT < cycle ) 183 190 printk("\n[%s] thread[%x,%x] created <%s> inode in cluster %x / cycle %d\n", 184 191 __FUNCTION__, this->process->pid, this->trdid, node_name, local_cxy, cycle ); … … 209 216 hal_remote_spt( XPTR( inode_cxy , &inode_ptr->extend ) , chdev_ptr ); 210 217 211 #if DEBUG_DEVFS_ INIT212 cycle = (uint32_t)hal_get_cycles(); 213 if( DEBUG_DEVFS_ INIT < cycle )218 #if DEBUG_DEVFS_LOCAL_INIT 219 cycle = (uint32_t)hal_get_cycles(); 220 if( DEBUG_DEVFS_LOCAL_INIT < cycle ) 214 221 printk("\n[%s] thread[%x,%x] created <mmc> inode in cluster %x\n", 215 222 __FUNCTION__, this->process->pid, this->trdid, local_cxy, cycle ); … … 244 251 hal_remote_spt( XPTR( inode_cxy , &inode_ptr->extend ) , chdev_ptr ); 245 252 246 #if DEBUG_DEVFS_ INIT247 cycle = (uint32_t)hal_get_cycles(); 248 if( DEBUG_DEVFS_ INIT < cycle )253 #if DEBUG_DEVFS_LOCAL_INIT 254 cycle = (uint32_t)hal_get_cycles(); 255 if( DEBUG_DEVFS_LOCAL_INIT < cycle ) 249 256 printk("\n[%s] thread [%x,%x] created <dma[%d]> inode in cluster %x\n", 250 257 __FUNCTION__, this->process->pid, this->trdid, channel, local_cxy, cycle ); … … 277 284 hal_remote_spt( XPTR( inode_cxy , &inode_ptr->extend ) , chdev_ptr ); 278 285 279 #if DEBUG_DEVFS_ INIT280 cycle = (uint32_t)hal_get_cycles(); 281 if( DEBUG_DEVFS_ INIT < cycle )286 #if DEBUG_DEVFS_LOCAL_INIT 287 cycle = (uint32_t)hal_get_cycles(); 288 if( DEBUG_DEVFS_LOCAL_INIT < cycle ) 282 289 printk("\n[%s] thread[%x,%x] created <iob> inode in cluster %x\n", 283 290 __FUNCTION__, this->process->pid, this->trdid, local_cxy, cycle ); … … 310 317 hal_remote_spt( XPTR( inode_cxy , &inode_ptr->extend ) , chdev_ptr ); 311 318 312 #if DEBUG_DEVFS_ INIT313 cycle = (uint32_t)hal_get_cycles(); 314 if( DEBUG_DEVFS_ INIT < cycle )319 #if DEBUG_DEVFS_LOCAL_INIT 320 cycle = (uint32_t)hal_get_cycles(); 321 if( DEBUG_DEVFS_LOCAL_INIT < cycle ) 315 322 printk("\n[%s] thread[%x,%x] created <pic> inode in cluster %x\n", 316 323 __FUNCTION__, this->process->pid, this->trdid, local_cxy, cycle ); … … 345 352 hal_remote_spt( XPTR( inode_cxy , &inode_ptr->extend ) , chdev_ptr ); 346 353 347 #if DEBUG_DEVFS_ INIT348 cycle = (uint32_t)hal_get_cycles(); 349 if( DEBUG_DEVFS_ INIT < cycle )354 #if DEBUG_DEVFS_LOCAL_INIT 355 cycle = (uint32_t)hal_get_cycles(); 356 if( DEBUG_DEVFS_LOCAL_INIT < cycle ) 350 357 printk("\n[%s] thread[%x,%x] created <txt_rx[%d]> inode in cluster %x\n", 351 358 __FUNCTION__, this->process->pid, this->trdid, channel, local_cxy, cycle ); … … 381 388 hal_remote_spt( XPTR( inode_cxy , &inode_ptr->extend ) , chdev_ptr ); 382 389 383 #if DEBUG_DEVFS_ INIT384 cycle = (uint32_t)hal_get_cycles(); 385 if( DEBUG_DEVFS_ INIT < cycle )390 #if DEBUG_DEVFS_LOCAL_INIT 391 cycle = (uint32_t)hal_get_cycles(); 392 if( DEBUG_DEVFS_LOCAL_INIT < cycle ) 386 393 printk("\n[%s] thread[%x,%x] created <txt_tx[%d]> inode in cluster %x\n", 387 394 __FUNCTION__, this->process->pid, this->trdid, channel, local_cxy, cycle ); … … 417 424 hal_remote_spt( XPTR( inode_cxy , &inode_ptr->extend ) , chdev_ptr ); 418 425 419 #if DEBUG_DEVFS_ INIT420 cycle = (uint32_t)hal_get_cycles(); 421 if( DEBUG_DEVFS_ INIT < cycle )426 #if DEBUG_DEVFS_LOCAL_INIT 427 cycle = (uint32_t)hal_get_cycles(); 428 if( DEBUG_DEVFS_LOCAL_INIT < cycle ) 422 429 printk("\n[%s] thread[%x,%x] created <ioc[%d]> inode in cluster %x\n", 423 430 __FUNCTION__, this->process->pid, this->trdid, channel, local_cxy, cycle ); … … 453 460 hal_remote_spt( XPTR( inode_cxy , &inode_ptr->extend ) , chdev_ptr ); 454 461 455 #if DEBUG_DEVFS_ INIT456 cycle = (uint32_t)hal_get_cycles(); 457 if( DEBUG_DEVFS_ INIT < cycle )462 #if DEBUG_DEVFS_LOCAL_INIT 463 cycle = (uint32_t)hal_get_cycles(); 464 if( DEBUG_DEVFS_LOCAL_INIT < cycle ) 458 465 printk("\n[%s] thread[%x,%x] created <fbf[%d]> inode in cluster %x\n", 459 466 __FUNCTION__, this->process->pid, this->trdid, channel, local_cxy, cycle ); … … 489 496 hal_remote_spt( XPTR( inode_cxy , &inode_ptr->extend ) , chdev_ptr ); 490 497 491 #if DEBUG_DEVFS_ INIT492 cycle = (uint32_t)hal_get_cycles(); 493 if( DEBUG_DEVFS_ INIT < cycle )498 #if DEBUG_DEVFS_LOCAL_INIT 499 cycle = (uint32_t)hal_get_cycles(); 500 if( DEBUG_DEVFS_LOCAL_INIT < cycle ) 494 501 printk("\n[%s] thread[%x,%x] created <nic_rx[%d]> inode in cluster %x\n", 495 502 __FUNCTION__, this->process->pid, this->trdid, channel, local_cxy, cycle ); … … 525 532 hal_remote_spt( XPTR( inode_cxy , &inode_ptr->extend ) , chdev_ptr ); 526 533 527 #if DEBUG_DEVFS_ INIT528 cycle = (uint32_t)hal_get_cycles(); 529 if( DEBUG_DEVFS_ INIT < cycle )534 #if DEBUG_DEVFS_LOCAL_INIT 535 cycle = (uint32_t)hal_get_cycles(); 536 if( DEBUG_DEVFS_LOCAL_INIT < cycle ) 530 537 printk("\n[%s] thread[%x,%x] created <nic_tx[%d]> inode in cluster %x\n", 531 538 __FUNCTION__, this->process->pid, this->trdid, channel, local_cxy, cycle ); -
trunk/kernel/fs/fatfs.c
r612 r614 1064 1064 { 1065 1065 // copy the modified page to IOC device 1066 fatfs_move_page( page_xp , false);1066 fatfs_move_page( page_xp , IOC_SYNC_WRITE ); 1067 1067 1068 1068 // get the next page in FAT mapper … … 1217 1217 1218 1218 // copy the modified page to the IOC device 1219 fatfs_move_page( page_xp , false);1219 fatfs_move_page( page_xp , IOC_SYNC_WRITE ); 1220 1220 1221 1221 #if DEBUG_FATFS_ADD_DENTRY 1222 1222 cycle = (uint32_t)hal_get_cycles(); 1223 1223 if( DEBUG_FATFS_ADD_DENTRY < cycle ) 1224 printk("\n[%s] thread[%x,%x] exit / parent %s / child %s/ cycle %d\n",1224 printk("\n[%s] thread[%x,%x] exit / parent <%s> / child <%s> / cycle %d\n", 1225 1225 __FUNCTION__, this->process->pid, this->trdid, dir_name, dentry->name, cycle ); 1226 1226 #endif … … 1313 1313 1314 1314 // copy the modified page to the IOC device 1315 fatfs_move_page( page_xp , false);1315 fatfs_move_page( page_xp , IOC_SYNC_WRITE ); 1316 1316 1317 1317 // get extended pointer on page descriptor from parent directory mapper … … 1345 1345 1346 1346 // copy the modified page to the IOC device 1347 fatfs_move_page( page_xp , false);1347 fatfs_move_page( page_xp , IOC_SYNC_WRITE ); 1348 1348 1349 1349 … … 1396 1396 xptr_t base_xp; // extended pointer on page base 1397 1397 uint8_t * base; // local pointer on page base 1398 uint 32_tattr; // directory entry ATTR field1399 uint 32_tord; // directory entry ORD field1398 uint8_t attr; // directory entry ATTR field 1399 uint8_t ord; // directory entry ORD field 1400 1400 uint32_t seq; // sequence index 1401 1401 uint32_t lfn = 0; // LFN entries number … … 1422 1422 #if (DEBUG_FATFS_GET_DENTRY & 0x1) 1423 1423 if( DEBUG_FATFS_GET_DENTRY < cycle ) 1424 mapper_display_page( mapper_xp , page_id , 256 , parent_name);1424 mapper_display_page( mapper_xp , page_id , 256 ); 1425 1425 #endif 1426 1426 // scan this page until end of directory, end of page, or name found … … 1435 1435 } 1436 1436 else if ( ord == FREE_ENTRY ) // free entry => skip 1437 { 1438 offset = offset + 32; 1439 } 1440 else if ( attr == 0x28 ) // volune_id => skip 1437 1441 { 1438 1442 offset = offset + 32; … … 1577 1581 assert( (detailed == false), "detailed argument not supported/n"); 1578 1582 1579 char cname[CONFIG_VFS_MAX_NAME_LENGTH]; // name extracte r from each directoryentry1583 char cname[CONFIG_VFS_MAX_NAME_LENGTH]; // name extracted from each dentry 1580 1584 1581 1585 char lfn1[16]; // buffer for one partial cname … … 1585 1589 xptr_t base_xp; // extended pointer on page base 1586 1590 uint8_t * base; // local pointer on page base 1587 uint 32_tattr; // directory entry ATTR field1588 uint 32_tord; // directory entry ORD field1591 uint8_t attr; // directory entry ATTR field 1592 uint8_t ord; // directory entry ORD field 1589 1593 uint32_t seq; // sequence index 1590 1594 uint32_t lfn = 0; // LFN entries number … … 1609 1613 #if (DEBUG_FATFS_GET_USER_DIR & 0x1) 1610 1614 if( DEBUG_FATFS_GET_USER_DIR < cycle ) 1611 mapper_display_page( mapper_xp , page_id , 256 , inode_name);1615 mapper_display_page( mapper_xp , page_id , 256 ); 1612 1616 #endif 1613 1617 // loop on NORMAL/LFN (32 bytes) directory entries in this page … … 1625 1629 } 1626 1630 else if ( ord == FREE_ENTRY ) // free entry => skip 1631 { 1632 offset = offset + 32; 1633 } 1634 else if ( attr == 0x28 ) // volune_id => skip 1627 1635 { 1628 1636 offset = offset + 32; … … 1693 1701 if( DEBUG_FATFS_GET_USER_DIR < cycle ) 1694 1702 printk("\n[%s] thread[%x,%x] exit for inode <%s> / %d entries / cycle %d\n", 1695 __FUNCTION__, this->process->pid, this->trdid, inode_name, entries, cycle );1703 __FUNCTION__, this->process->pid, this->trdid, inode_name, dirent_id, cycle ); 1696 1704 #endif 1697 1705 … … 1756 1764 1757 1765 // move page from mapper to device 1758 error = fatfs_move_page( page_xp , false);1766 error = fatfs_move_page( page_xp , IOC_WRITE ); 1759 1767 1760 1768 if ( error ) return -1; … … 1827 1835 #endif 1828 1836 // move page from mapper to device 1829 error = fatfs_move_page( page_xp , false);1837 error = fatfs_move_page( page_xp , IOC_SYNC_WRITE ); 1830 1838 1831 1839 if ( error ) return -1; … … 2132 2140 } // end fatfs_release_inode() 2133 2141 2134 ///////////////////////////////////////// 2135 error_t fatfs_move_page( xptr_t page_xp,2136 bool_t to_mapper)2142 //////////////////////////////////////////// 2143 error_t fatfs_move_page( xptr_t page_xp, 2144 cmd_type_t cmd_type ) 2137 2145 { 2138 2146 error_t error; … … 2172 2180 2173 2181 // access device 2174 if( to_mapper ) error = dev_ioc_sync_read ( buffer , lba , 8 ); 2175 else error = dev_ioc_write( buffer , lba , 8 ); 2182 if ( cmd_type == IOC_SYNC_READ ) error = dev_ioc_sync_read ( buffer , lba , 8 ); 2183 else if( cmd_type == IOC_SYNC_WRITE ) error = dev_ioc_sync_write( buffer , lba , 8 ); 2184 else if( cmd_type == IOC_READ ) error = dev_ioc_read ( buffer , lba , 8 ); 2185 else if( cmd_type == IOC_WRITE ) error = dev_ioc_write ( buffer , lba , 8 ); 2186 else error = -1; 2176 2187 2177 2188 if( error ) return EIO; … … 2179 2190 #if (DEBUG_FATFS_MOVE_PAGE & 0x1) 2180 2191 if( DEBUG_FATFS_MOVE_PAGE < cycle ) 2181 mapper_display_page( XPTR(page_cxy , mapper_ptr) , page_id , "FAT");2192 mapper_display_page( XPTR(page_cxy , mapper_ptr) , page_id ); 2182 2193 #endif 2183 2194 … … 2186 2197 if( DEBUG_FATFS_MOVE_PAGE < cycle ) 2187 2198 { 2188 if ( to_mapper)2199 if ( (cmd_type == IOC_READ) || (cmd_type == IOC_SYNC_READ) ) 2189 2200 printk("\n[%s] thread[%x,%x] load page %d of FAT / cycle %d\n", 2190 2201 __FUNCTION__, this->process->pid, this->trdid, page_id, cycle ); … … 2230 2241 2231 2242 // access device 2232 if( to_mapper ) error = dev_ioc_sync_read ( buffer , lba , 8 ); 2233 else error = dev_ioc_write( buffer , lba , 8 ); 2243 if ( cmd_type == IOC_SYNC_READ ) error = dev_ioc_sync_read ( buffer , lba , 8 ); 2244 else if( cmd_type == IOC_SYNC_WRITE ) error = dev_ioc_sync_write( buffer , lba , 8 ); 2245 else if( cmd_type == IOC_READ ) error = dev_ioc_read ( buffer , lba , 8 ); 2246 else if( cmd_type == IOC_WRITE ) error = dev_ioc_write ( buffer , lba , 8 ); 2247 else error = -1; 2234 2248 2235 2249 if( error ) return EIO; … … 2237 2251 #if (DEBUG_FATFS_MOVE_PAGE & 0x1) 2238 2252 if( DEBUG_FATFS_MOVE_PAGE < cycle ) 2239 char string[CONFIG_VFS_MAX_NAME_LENGTH]; 2240 vfs_inode_get_name( XPTR(page_cxy , inode_ptr) , string ); 2241 mapper_display_page( XPTR(page_cxy , mapper_ptr) , page_id , string ); 2253 mapper_display_page( XPTR(page_cxy , mapper_ptr) , page_id ); 2242 2254 #endif 2243 2255 … … 2246 2258 if(DEBUG_FATFS_MOVE_PAGE < cycle) 2247 2259 { 2248 if (to_mapper)2260 if ( (cmd_type == IOC_READ) || (cmd_type == IOC_SYNC_READ) ) 2249 2261 printk("\n[%s] thread[%x,%x] load page %d of <%s> inode / cycle %d\n", 2250 2262 __FUNCTION__, this->process->pid, this->trdid, page_id, name, cycle ); -
trunk/kernel/fs/fatfs.h
r612 r614 29 29 #include <remote_queuelock.h> 30 30 #include <vfs.h> 31 #include <dev_ioc.h> 31 32 32 33 … … 365 366 * and copies from mapper to device each page marked as dirty. 366 367 * WARNING : The target <inode> cannot be a directory, because all modifications in a 367 * directory *are synchronously done on the IOC device by the two fatfs_add_dentry()368 * directory are synchronously done on the IOC device by the two fatfs_add_dentry() 368 369 * and fatfs_remove_dentry() functions. 369 370 ***************************************************************************************** … … 451 452 ***************************************************************************************** 452 453 * @ page_xp : extended pointer on page descriptor. 453 * @ to_mapper : true for device->mapper / false for mapper->device454 * @ cmd_type : IOC_READ / IOC_WRITE / IOC_SYNC_READ / IOC_SYNC_WRITE 454 455 * @ return 0 if success / return EIO if error during device access. 455 456 ****************************************************************************************/ 456 error_t fatfs_move_page( xptr_t page_xp,457 bool_t to_mapper);457 error_t fatfs_move_page( xptr_t page_xp, 458 cmd_type_t cmd_type ); 458 459 459 460 -
trunk/kernel/fs/vfs.c
r612 r614 424 424 uint32_t cycle = (uint32_t)hal_get_cycles(); 425 425 if( DEBUG_VFS_DENTRY_CREATE < cycle ) 426 printk("\n[%s] thread[%x,%x] enter for <%s> / parent_inode %x /cycle %d\n",427 __FUNCTION__, this->process->pid, this->trdid, name, parent,cycle );426 printk("\n[%s] thread[%x,%x] enter for <%s> / cycle %d\n", 427 __FUNCTION__, this->process->pid, this->trdid, name, cycle ); 428 428 #endif 429 429 … … 1946 1946 1947 1947 // display inode 1948 nolock_printk("%s %s <%s> : inum %d / %d bytes / dirty %d / cxy %x (inode %x / mapper %x)\n",1949 indent_str[indent], vfs_inode_type_str( inode_type ), name,1948 nolock_printk("%s<%s> : %s / inum %d / %d bytes / dirty %d / cxy %x / inode %x / mapper %x\n", 1949 indent_str[indent], name, vfs_inode_type_str( inode_type ), 1950 1950 inode_inum, inode_size, inode_dirty, inode_cxy, inode_ptr, mapper_ptr ); 1951 1951 … … 3191 3191 ////////////////////////////////////////////////////////////////////////////////////////// 3192 3192 3193 /////////////////////////////////////////// 3194 error_t vfs_fs_move_page( xptr_t page_xp,3195 bool_t to_mapper)3193 ////////////////////////////////////////////// 3194 error_t vfs_fs_move_page( xptr_t page_xp, 3195 cmd_type_t cmd_type ) 3196 3196 { 3197 3197 error_t error = 0; … … 3213 3213 if( fs_type == FS_TYPE_FATFS ) 3214 3214 { 3215 error = fatfs_move_page( page_xp , to_mapper);3215 error = fatfs_move_page( page_xp , cmd_type ); 3216 3216 } 3217 3217 else if( fs_type == FS_TYPE_RAMFS ) -
trunk/kernel/fs/vfs.h
r612 r614 41 41 #include <ramfs.h> 42 42 #include <devfs.h> 43 #include <dev_ioc.h> 43 44 44 45 /**** Forward declarations ***/ … … 408 409 * It must called by a local thread. Use the RPC_DENTRY_CREATE if client thread is remote. 409 410 ****************************************************************************************** 410 * @ fs_type : file system type.411 * @ name : directory entry file/dir name.411 * @ fs_type : [in] file system type. 412 * @ name : [in] directory entry file/dir name. 412 413 * @ dentry_xp : [out] buffer for extended pointer on created dentry. 413 414 * @ return 0 if success / return ENOMEM or EINVAL if error. … … 421 422 * allocated to the dentry descriptor. 422 423 * It must be executed by a thread running in the cluster containing the dentry. 423 * Use the rpc_vfs_dentry_destroy_client() functionif required.424 * Use the RPC_DENTRY_DESTROY if required. 424 425 ****************************************************************************************** 425 * @ dentry : local pointer on dentry descriptor.426 * @ dentry : [in] local pointer on dentry descriptor. 426 427 *****************************************************************************************/ 427 428 void vfs_dentry_destroy( vfs_dentry_t * dentry ); … … 875 876 876 877 /****************************************************************************************** 878 * This function makes the I/O operation to move one page identified by the <page_xp> 879 * argument to/from the IOC device from/to the mapper, as defined by <cmd_type>. 880 * Depending on the file system type, it calls the proper, FS specific function. 881 * It is used in case of MISS on the mapper, or when a dirty page in the mapper must 882 * be updated in the File System. 883 * The mapper pointer is obtained from the page descriptor. 884 * It can be executed by any thread running in any cluster. 885 * This function does NOT take any lock. 886 ****************************************************************************************** 887 * @ page_xp : extended pointer on page descriptor (for mapper and page_id). 888 * @ cmd_type : IOC_READ / IOC_WRITE / IOC_SYNC_READ / IOC_SYNC_WRITE 889 * @ returns 0 if success / return -1 if device access failure. 890 *****************************************************************************************/ 891 error_t vfs_fs_move_page( xptr_t page_xp, 892 cmd_type_t cmd_type ); 893 894 /****************************************************************************************** 877 895 * This function updates the mapper associated to a directory inode identified by the 878 896 * <parent> argument, to add a new entry identified by the <dentry> argument. … … 1032 1050 error_t vfs_fs_release_inode( xptr_t inode_xp ); 1033 1051 1034 /******************************************************************************************1035 * This function makes the I/O operation to move one page identified by the <page_xp>1036 * argument to/from the IOC device from/to the mapper, as defined by <to_mapper>.1037 * Depending on the file system type, it calls the proper, FS specific function.1038 * It is used in case of MISS on the mapper, or when a dirty page in the mapper must1039 * be updated in the File System.1040 * The mapper pointer is obtained from the page descriptor.1041 * It can be executed by any thread running in any cluster.1042 * This function does NOT take any lock.1043 ******************************************************************************************1044 * @ page_xp : extended pointer on the page descriptor.1045 * @ to_mapper : transfer direction.1046 * @ returns 0 if success / return -1 if device access failure.1047 *****************************************************************************************/1048 error_t vfs_fs_move_page( xptr_t page_xp,1049 bool_t to_mapper );1050 1051 1052 1052 1053 #endif /* _VFS_H_ */ -
trunk/kernel/kern/kernel_init.c
r612 r614 1113 1113 vfs_root_inode_xp = XPTR_NULL; 1114 1114 1115 // File System must be FATFS in this implementation,1116 // butother File System can be introduced here1115 // Only FATFS is supported yet, 1116 // other File System can be introduced here 1117 1117 if( CONFIG_VFS_ROOT_IS_FATFS ) 1118 1118 { … … 1172 1172 } 1173 1173 1174 // create the <.> and <..> dentries in VFS root directory 1175 // the VFS root parent inode is the VFS root inode itself 1176 vfs_add_special_dentries( vfs_root_inode_xp, 1177 vfs_root_inode_xp ); 1178 1174 1179 // register VFS root inode in process_zero descriptor of cluster 0 1175 1180 process_zero.vfs_root_xp = vfs_root_inode_xp; … … 1255 1260 #if DEBUG_KERNEL_INIT 1256 1261 if( (core_lid == 0) & (local_cxy == 1) ) 1257 printk("\n[%s] : exit barrier 4: VFS root (%x,%x) in cluster 1 / cycle %d\n",1262 printk("\n[%s] : exit barrier 5 : VFS root (%x,%x) in cluster 1 / cycle %d\n", 1258 1263 __FUNCTION__, GET_CXY(process_zero.vfs_root_xp), 1259 1264 GET_PTR(process_zero.vfs_root_xp), (uint32_t)hal_get_cycles() ); -
trunk/kernel/kern/rpc.c
r612 r614 54 54 &rpc_undefined, // 2 unused slot 55 55 &rpc_process_make_fork_server, // 3 56 &rpc_u ndefined, // 4 unused slot57 &rpc_u ndefined, // 5 unused slot56 &rpc_user_dir_create_server, // 4 57 &rpc_user_dir_destroy_server, // 5 58 58 &rpc_thread_user_create_server, // 6 59 59 &rpc_thread_kernel_create_server, // 7 … … 90 90 "undefined", // 2 91 91 "PROCESS_MAKE_FORK", // 3 92 " undefined",// 493 " undefined",// 592 "USER_DIR_CREATE", // 4 93 "USER_DIR_DESTROY", // 5 94 94 "THREAD_USER_CREATE", // 6 95 95 "THREAD_KERNEL_CREATE", // 7 … … 657 657 void rpc_user_dir_create_client( cxy_t cxy, 658 658 vfs_inode_t * inode, 659 xptr_t ref_xp, 659 660 user_dir_t ** dir ) 660 661 { … … 677 678 // set input arguments in RPC descriptor 678 679 rpc.args[0] = (uint64_t)(intptr_t)inode; 680 rpc.args[1] = (uint64_t)ref_xp; 679 681 680 682 // register RPC request in remote RPC fifo … … 682 684 683 685 // get output argument from RPC descriptor 684 *dir = (user_dir_t *)(intptr_t)rpc.args[ 1];686 *dir = (user_dir_t *)(intptr_t)rpc.args[2]; 685 687 686 688 #if DEBUG_RPC_USER_DIR_CREATE … … 704 706 705 707 vfs_inode_t * inode; // pointer on inode in server cluster 708 xptr_t ref_xp; // extended pointer on reference user process 706 709 user_dir_t * dir; // pointer on user_dir structure in server cluster 707 710 … … 711 714 712 715 // get input argument from RPC descriptor 713 inode = (vfs_inode_t *)(intptr_t)hal_remote_l64(XPTR(client_cxy , &desc->args[0])); 716 inode = (vfs_inode_t *)(intptr_t)hal_remote_l64(XPTR(client_cxy , &desc->args[0])); 717 ref_xp = (xptr_t) hal_remote_l64(XPTR(client_cxy , &desc->args[1])); 714 718 715 719 // call kernel function 716 dir = user_dir_create( inode );720 dir = user_dir_create( inode , ref_xp ); 717 721 718 722 // set output argument into RPC descriptor 719 hal_remote_s64( XPTR( client_cxy , &desc->args[ 1] ) , (intptr_t)dir );723 hal_remote_s64( XPTR( client_cxy , &desc->args[2] ) , (intptr_t)dir ); 720 724 721 725 #if DEBUG_RPC_USER_DIR_CREATE … … 733 737 //////////////////////////////////////////////////// 734 738 void rpc_user_dir_destroy_client( cxy_t cxy, 735 user_dir_t * dir ) 739 user_dir_t * dir, 740 xptr_t ref_xp ) 736 741 { 737 742 #if DEBUG_RPC_USER_DIR_DESTROY … … 753 758 // set input arguments in RPC descriptor 754 759 rpc.args[0] = (uint64_t)(intptr_t)dir; 760 rpc.args[1] = (uint64_t)ref_xp; 755 761 756 762 // register RPC request in remote RPC fifo … … 777 783 778 784 user_dir_t * dir; // pointer on user_dir structure in server cluster 785 xptr_t ref_xp; // extended pointer on reference process 779 786 780 787 // get client cluster identifier and pointer on RPC descriptor … … 783 790 784 791 // get input argument from RPC descriptor 785 dir = (user_dir_t *)(intptr_t)hal_remote_l64(XPTR(client_cxy , &desc->args[0])); 792 dir = (user_dir_t *)(intptr_t)hal_remote_l64(XPTR(client_cxy , &desc->args[0])); 793 ref_xp = (xptr_t) hal_remote_l64(XPTR(client_cxy , &desc->args[1])); 786 794 787 795 // call kernel function 788 user_dir_destroy( dir );796 user_dir_destroy( dir , ref_xp ); 789 797 790 798 #if DEBUG_RPC_USER_DIR_DESTROY -
trunk/kernel/kern/rpc.h
r612 r614 229 229 * [4] The RPC_USER_DIR_CREATE allows a client thread to create an user_dir_t 230 230 * structure and the associated array of dirents in a remote cluster containing 231 * the target directory inode. It is called by the sys_opendir() function. 231 * the target directory <inode>. It creates an ANON vseg in the user reference 232 * process VMM identified by the <ref_xp>. This reference cluster cluster can be 233 * different from both the client and server clusters. 234 * It is called by the sys_opendir() function. 232 235 *********************************************************************************** 233 236 * @ cxy : server cluster identifier. 234 237 * @ inode : [in] local pointer on inode in server cluster. 238 * @ ref_xp : [in] extended pointer on user reference process descriptor. 235 239 * @ dir : [out] local pointer on created user_dir structure. 236 240 **********************************************************************************/ 237 241 void rpc_user_dir_create_client( cxy_t cxy, 238 242 struct vfs_inode_s * inode, 243 xptr_t ref_xp, 239 244 struct user_dir_s ** dir ); 240 245 … … 248 253 * @ cxy : server cluster identifier. 249 254 * @ dir : [in] local pointer on created user_dir structure. 255 * @ ref_xp : [in] extended pointer on user reference process descriptor. 250 256 **********************************************************************************/ 251 257 void rpc_user_dir_destroy_client( cxy_t cxy, 252 struct user_dir_s * dir ); 258 struct user_dir_s * dir, 259 xptr_t ref_xp ); 253 260 254 261 void rpc_user_dir_destroy_server( xptr_t xp ); -
trunk/kernel/kern/scheduler.c
r610 r614 487 487 488 488 #if (DEBUG_SCHED_YIELD & 0x1) 489 if( sched->trace ) sched_display( lid ); 489 if( sched->trace ) 490 sched_display( lid ); 490 491 #endif 491 492 492 // This assert should never be false, as this check must be493 // done before by any function that can possibly deschedule...493 // This assert should never be false, as this check has been 494 // done before, by any function that can possibly deschedule... 494 495 assert( (current->busylocks == 0), 495 496 "unexpected descheduling of thread holding %d busylocks = %d\n", current->busylocks ); -
trunk/kernel/kernel_config.h
r612 r614 3 3 * 4 4 * Authors Mohamed Lamine Karaoui (2015) 5 * Alain Greiner (2016,2017,2018 )5 * Alain Greiner (2016,2017,2018,2019) 6 6 * 7 7 * Copyright (c) UPMC Sorbonne Universites … … 62 62 #define DEBUG_DEV_PIC 0 63 63 64 #define DEBUG_DEVFS_INIT 1 64 #define DEBUG_DEVFS_GLOBAL_INIT 0 65 #define DEBUG_DEVFS_LOCAL_INIT 0 65 66 #define DEBUG_DEVFS_MOVE 0 66 67 … … 78 79 #define DEBUG_FATFS_FREE_CLUSTERS 0 79 80 #define DEBUG_FATFS_GET_CLUSTER 0 80 #define DEBUG_FATFS_GET_D IRENT 181 #define DEBUG_FATFS_GET_USER_DIR 181 #define DEBUG_FATFS_GET_DENTRY 0 82 #define DEBUG_FATFS_GET_USER_DIR 0 82 83 #define DEBUG_FATFS_MOVE_PAGE 0 83 84 #define DEBUG_FATFS_RELEASE_INODE 0 … … 212 213 #define DEBUG_THREAD_USER_EXEC 0 213 214 214 #define DEBUG_USER_DIR 1215 #define DEBUG_USER_DIR 0 215 216 216 217 #define DEBUG_VFS_ADD_CHILD 0 217 #define DEBUG_VFS_ADD_SPECIAL 1218 #define DEBUG_VFS_ADD_SPECIAL 0 218 219 #define DEBUG_VFS_CHDIR 0 219 220 #define DEBUG_VFS_CLOSE 0 … … 229 230 #define DEBUG_VFS_NEW_CHILD_INIT 0 230 231 #define DEBUG_VFS_OPEN 0 232 #define DEBUG_VFS_OPENDIR 0 231 233 #define DEBUG_VFS_STAT 0 232 234 #define DEBUG_VFS_UNLINK 0 … … 241 243 #define DEBUG_VMM_HANDLE_COW 0 242 244 #define DEBUG_VMM_INIT 0 245 #define DEBUG_VMM_MMAP_ALLOC 0 243 246 #define DEBUG_VMM_PAGE_ALLOCATE 0 244 247 #define DEBUG_VMM_SET_COW 0 … … 356 359 #define CONFIG_VFS_ROOT_IS_EX2FS 0 // root FS is EX2FS if non zero 357 360 361 #define CONFIG_MAPPER_GRDXT_W1 7 // number of bits for RADIX_TREE_IX1 362 #define CONFIG_MAPPER_GRDXT_W2 7 // number of bits for RADIX_TREE_IX2 363 #define CONFIG_MAPPER_GRDXT_W3 6 // number of bits for RADIX_TREE_IX3 364 358 365 //////////////////////////////////////////////////////////////////////////////////////////// 359 366 // DQDT … … 394 401 #define CONFIG_REMOTE_FIFO_MAX_ITERATIONS 1024 395 402 #define CONFIG_RPC_THREADS_MAX 4 // max number of RPC threads per core 396 397 ////////////////////////////////////////////////////////////////////////////////////////////398 // MAPPER399 ////////////////////////////////////////////////////////////////////////////////////////////400 401 #define CONFIG_MAPPER_MAX_FRAGMENTS 10 // max number of fragments moved402 #define CONFIG_MAPPER_MIN CONFIG_VFS_INODE_MIN403 #define CONFIG_MAPPER_MAX CONFIG_VFS_INODE_MAX404 #define CONFIG_MAPPER_GRDXT_W1 7 // number of bits for RADIX_TREE_IX1405 #define CONFIG_MAPPER_GRDXT_W2 7 // number of bits for RADIX_TREE_IX2406 #define CONFIG_MAPPER_GRDXT_W3 6 // number of bits for RADIX_TREE_IX3407 403 408 404 //////////////////////////////////////////////////////////////////////////////////////////// -
trunk/kernel/libk/user_dir.c
r613 r614 80 80 } // end user_dir_from_ident() 81 81 82 /////////////////////////////////////////////////// 83 user_dir_t * user_dir_create( vfs_inode_t * inode ) 82 ////////////////////////////////////////////////// 83 user_dir_t * user_dir_create( vfs_inode_t * inode, 84 xptr_t ref_xp ) 84 85 { 85 86 user_dir_t * dir; // local pointer on created user_dir_t 86 87 vseg_t * vseg; // local pointer on dirent array vseg 87 88 uint32_t vseg_size; // size of vseg in bytes 88 process_t * process; // local pointer on calling process89 xptr_t ref_xp; // extended pointer on reference process90 89 process_t * ref_ptr; // local pointer on reference process 91 90 cxy_t ref_cxy; // reference process cluster identifier 91 pid_t ref_pid; // reference process PID 92 92 xptr_t gpt_xp; // extended pointer on reference process GPT 93 93 uint32_t gpt_attributes; // attributes for all mapped gpt entries … … 109 109 error_t error; 110 110 111 // get pointer on local process descriptor 112 process = CURRENT_THREAD->process; 111 // get cluster, local pointer, and pid of reference user process 112 ref_cxy = GET_CXY( ref_xp ); 113 ref_ptr = GET_PTR( ref_xp ); 114 ref_pid = hal_remote_l32( XPTR( ref_cxy , &ref_ptr->pid ) ); 113 115 114 116 #if DEBUG_USER_DIR … … 116 118 thread_t * this = CURRENT_THREAD; 117 119 if( cycle > DEBUG_USER_DIR ) 118 printk("\n[%s] thread[%x,%x] enter for inode (%x,%x) / cycle %d\n",119 __FUNCTION__, process->pid, this->trdid, local_cxy, inode, cycle );120 printk("\n[%s] thread[%x,%x] enter for inode (%x,%x) and process %x / cycle %d\n", 121 __FUNCTION__, this->process->pid, this->trdid, local_cxy, inode, ref_pid, cycle ); 120 122 #endif 121 123 … … 128 130 // initialise temporary list of pages 129 131 list_root_init( &root ); 130 131 // get pointers on reference process132 ref_xp = process->ref_xp;133 ref_cxy = GET_CXY( ref_xp );134 ref_ptr = GET_PTR( ref_xp );135 132 136 133 // allocate memory for a local user_dir descriptor … … 207 204 } // end while 208 205 206 #if DEBUG_USER_DIR 207 if( cycle > DEBUG_USER_DIR ) 208 printk("\n[%s] thread[%x,%x] initialised dirent array / %d entries\n", 209 __FUNCTION__, this->process->pid, this->trdid, total_dirents, cycle ); 210 #endif 211 209 212 // compute required vseg size for a 64 bytes dirent 210 213 vseg_size = total_dirents << 6; … … 213 216 if( local_cxy == ref_cxy ) 214 217 { 215 vseg = vmm_create_vseg( process,218 vseg = vmm_create_vseg( ref_ptr, 216 219 VSEG_TYPE_ANON, 217 220 0, // vseg base (unused) … … 220 223 0, // file_size (unused) 221 224 XPTR_NULL, // mapper (unused) 222 ref_cxy );225 local_cxy ); 223 226 } 224 227 else … … 232 235 0, // file size (unused) 233 236 XPTR_NULL, // mapper (unused) 234 ref_cxy,237 local_cxy, 235 238 &vseg ); 236 239 } 240 237 241 if( vseg == NULL ) 238 242 { 239 printk("\n[ERROR] in %s : cannot create vseg for DIRin cluster %x\n",243 printk("\n[ERROR] in %s : cannot create vseg for user_dir in cluster %x\n", 240 244 __FUNCTION__, ref_cxy); 241 245 goto user_dir_create_failure; 242 246 } 243 247 244 #if (DEBUG_USER_DIR & 1)248 #if DEBUG_USER_DIR 245 249 if( cycle > DEBUG_USER_DIR ) 246 250 printk("\n[%s] thread[%x,%x] allocated vseg ANON / base %x / size %x\n", 247 __FUNCTION__, process->pid, this->trdid, vseg->min, vseg->max - vseg->min );251 __FUNCTION__, this->process->pid, this->trdid, vseg->min, vseg->max - vseg->min ); 248 252 #endif 249 253 … … 289 293 desc.lid = CURRENT_THREAD->core->lid; 290 294 desc.blocking = true; 291 desc.args[0] = process->pid;295 desc.args[0] = ref_pid; 292 296 desc.args[1] = vpn << CONFIG_PPM_PAGE_SHIFT; 293 297 rpc_vmm_delete_vseg_client( ref_cxy , &desc ); … … 299 303 } 300 304 301 #if (DEBUG_USER_DIR & 1)305 #if DEBUG_USER_DIR 302 306 if( cycle > DEBUG_USER_DIR ) 303 307 printk("\n[%s] thread[%x,%x] mapped vpn %x to ppn %x\n", 304 __FUNCTION__, process->pid, this->trdid, vpn + page_id, ppn );308 __FUNCTION__, this->process->pid, this->trdid, vpn + page_id, ppn ); 305 309 #endif 306 310 … … 340 344 if( cycle > DEBUG_USER_DIR ) 341 345 printk("\n[%s] thread[%x,%x] created user_dir (%x,%x) / %d entries / cycle %d\n", 342 __FUNCTION__, process->pid, this->trdid, local_cxy, dir, total_dirents, cycle );346 __FUNCTION__, this->process->pid, this->trdid, local_cxy, dir, total_dirents, cycle ); 343 347 #endif 344 348 … … 365 369 } // end user_dir_create() 366 370 367 ///////////////////////////////////////// 368 void user_dir_destroy( user_dir_t * dir ) 371 //////////////////////////////////////// 372 void user_dir_destroy( user_dir_t * dir, 373 xptr_t ref_xp ) 369 374 { 370 process_t * process; // local pointer on client process371 thread_t * this; // local pointer on client thread375 thread_t * this; // local pointer on calling thread 376 process_t * process; // local pointer on calling process 372 377 cluster_t * cluster; // local pointer on local cluster 373 378 intptr_t ident; // user pointer on dirent array 374 xptr_t ref_ xp; // extended pointer on reference process379 xptr_t ref_pid; // reference process PID 375 380 cxy_t ref_cxy; // reference process cluster identifier 376 381 process_t * ref_ptr; // local pointer on reference process … … 379 384 xptr_t iter_xp; // iteratot in xlist 380 385 reg_t save_sr; // for critical section 381 pid_t pid; // process descriptor382 386 cxy_t owner_cxy; // owner process cluster 383 387 lpid_t lpid; // process local index 384 388 rpc_desc_t rpc; // rpc descriptor 385 389 386 // get pointers on c lientprocess & thread390 // get pointers on calling process & thread 387 391 this = CURRENT_THREAD; 388 392 process = this->process; 389 393 cluster = LOCAL_CLUSTER; 390 394 395 // get cluster, local pointer, and PID of reference user process 396 ref_cxy = GET_CXY( ref_xp ); 397 ref_ptr = GET_PTR( ref_xp ); 398 ref_pid = hal_remote_l32( XPTR( ref_cxy , &ref_ptr->pid ) ); 399 391 400 #if DEBUG_USER_DIR 392 401 uint32_t cycle = (uint32_t)hal_get_cycles(); 393 402 if( cycle > DEBUG_USER_DIR ) 394 printk("\n[%s] thread[%x,%x] enter for user_dir (%x,%x) / cycle %d\n",395 __FUNCTION__, process->pid, this->trdid, local_cxy, dir, cycle );403 printk("\n[%s] thread[%x,%x] enter for user_dir (%x,%x) and process %x / cycle %d\n", 404 __FUNCTION__, process->pid, this->trdid, local_cxy, dir, ref_pid, cycle ); 396 405 #endif 397 406 398 407 // get user pointer on dirent array 399 408 ident = dir->ident; 400 401 // get pointers on reference process402 ref_xp = process->ref_xp;403 ref_cxy = GET_CXY( ref_xp );404 ref_ptr = GET_PTR( ref_xp );405 409 406 410 // build extended pointer on lock protecting open directories list … … 424 428 425 429 // get owner cluster identifier and process lpid 426 pid = process->pid; 427 owner_cxy = CXY_FROM_PID( pid ); 428 lpid = LPID_FROM_PID( pid ); 430 owner_cxy = CXY_FROM_PID( ref_pid ); 431 lpid = LPID_FROM_PID( ref_pid ); 429 432 430 433 // get root of list of copies and lock from owner cluster … … 444 447 rpc.thread = this; 445 448 rpc.lid = this->core->lid; 446 rpc.args[0] = process->pid;449 rpc.args[0] = ref_pid; 447 450 rpc.args[1] = ident; 448 451 -
trunk/kernel/libk/user_dir.h
r613 r614 70 70 * in the reference process descriptor. 71 71 ***************************************************************************************** 72 * @ ident : DIR virtual address, used as identifier.72 * @ ident : [in] DIR virtual address, used as identifier. 73 73 * @ returns extended pointer on user_dir_t if success / returns XPTR_NULL if not found. 74 74 ****************************************************************************************/ … … 77 77 /***************************************************************************************** 78 78 * This function allocates memory and initializes a user_dir_t structure in the cluster 79 * containing the directory inode identified by the <inode> argument. 79 * containing the directory inode identified by the <inode> argument and map the 80 * user accessible dirent array in the reference user process VMM, identified by the 81 * <ref_xp> argument. 80 82 * It must be executed by a thread running in the cluster containing the target inode. 81 83 * Use the RPC_USER_DIR_CREATE when the client thread is remote. 82 84 * It makes the following actions: 83 * - the allocation of one user_dir_t descriptor in reference cluster.85 * - the allocation of one user_dir_t descriptor in the directory inode cluster. 84 86 * - the allocation of one or several physical pages in reference cluster to store 85 87 * all directory entries in an array of 64 bytes dirent structures, 86 88 * - the initialisation of this array from informations found in the Inode Tree. 87 * - the creation of an user accessible vseg containing this dirent array, and the88 * mapping of allrelevant physical pages in this vseg.89 * - the creation of an ANON vseg containing this dirent array in reference process VMM, 90 * and the mapping of the relevant physical pages in this vseg. 89 91 * - the registration of the created user_dir_t structure in the xlist rooted 90 92 * in the reference process, 91 93 * It returns a local pointer on the created user_dir_t structure. 92 94 ***************************************************************************************** 93 * @ inode : local pointer on the directory inode. 95 * @ inode : [in] local pointer on the directory inode. 96 * @ ref_xp : [in] extended pointer on the reference user process descriptor. 94 97 * @ return local pointer on user_dir_t if success / return XPTR_NULL if failure. 95 98 ****************************************************************************************/ 96 user_dir_t * user_dir_create( struct vfs_inode_s * inode ); 99 user_dir_t * user_dir_create( struct vfs_inode_s * inode, 100 xptr_t ref_xp ); 97 101 98 102 /***************************************************************************************** 99 103 * This function removes a user_dir_t structure from the xlist of user_dir_t 100 * structures rooted in the reference process descriptor, and release all memory 101 * allocated for the user_dir_t struct in the directory inode cluster, 102 * including the dirent array. 104 * structures rooted in the reference process descriptor, release all memory 105 * allocated for the user_dir_t struct in the directory inode cluster, including 106 * the dirent array, and delete all ANON vseg copies in all process VMM copies, 107 * using parallel RPCs. 103 108 * It must be executed by a thread running in the cluster containing the target inode. 104 109 * Use the RPC_USER_DIR_DESTROY when the client thread is remote. 105 110 ***************************************************************************************** 106 * @ dir : local pointer on user_dir_t structure. 111 * @ dir : [in] local pointer on user_dir_t structure. 112 * @ ref_xp : [in] extended pointer on the reference user process descriptor. 107 113 ****************************************************************************************/ 108 void user_dir_destroy( struct user_dir_s * dir ); 114 void user_dir_destroy( struct user_dir_s * dir, 115 xptr_t ref_xp ); 109 116 110 117 -
trunk/kernel/libk/xhtab.c
r612 r614 42 42 // XHTAB_DENTRY_TYPE 43 43 // This functions compute the hash index from the key, that is the directory entry name. 44 // In this implementation, the index value is simply the ASCII code of the first 45 // character, to provide an approximate lexicographic order. 44 46 /////////////////////////////////////////////////////////////////////////////////////////// 45 47 // @ key : local pointer on name. … … 49 51 { 50 52 char * name = key; 51 uint32_t index = 0; 53 54 return (name[0] % XHASHTAB_SIZE); 55 /* 56 uint32_t index = 0; 52 57 while( *name ) 53 58 { … … 55 60 } 56 61 return index % XHASHTAB_SIZE; 62 */ 63 57 64 } 58 65 -
trunk/kernel/libk/xhtab.h
r611 r614 61 61 /////////////////////////////////////////////////////////////////////////////////////////// 62 62 63 #define XHASHTAB_SIZE 8 // number of subsets63 #define XHASHTAB_SIZE 128 // number of subsets 64 64 65 65 /****************************************************************************************** -
trunk/kernel/mm/mapper.c
r611 r614 28 28 #include <hal_uspace.h> 29 29 #include <grdxt.h> 30 #include <string.h> 30 31 #include <rwlock.h> 31 32 #include <printk.h> … … 41 42 #include <vfs.h> 42 43 #include <mapper.h> 44 #include <dev_ioc.h> 43 45 44 46 … … 302 304 303 305 // launch I/O operation to load page from device to mapper 304 error = vfs_fs_move_page( XPTR( local_cxy , page ) , true);306 error = vfs_fs_move_page( XPTR( local_cxy , page ) , IOC_SYNC_READ ); 305 307 306 308 if( error ) … … 647 649 error_t mapper_display_page( xptr_t mapper_xp, 648 650 uint32_t page_id, 649 uint32_t nbytes, 650 char * string ) 651 { 652 xptr_t page_xp; // extended pointer on page descriptor 653 xptr_t base_xp; // extended pointer on page base 654 char buffer[4096]; // local buffer 655 uint32_t * tab; // pointer on uint32_t to scan the buffer 656 uint32_t line; // line index 657 uint32_t word; // word index 651 uint32_t nbytes ) 652 { 653 xptr_t page_xp; // extended pointer on page descriptor 654 xptr_t base_xp; // extended pointer on page base 655 char buffer[4096]; // local buffer 656 uint32_t * tabi; // pointer on uint32_t to scan buffer 657 char * tabc; // pointer on char to scan buffer 658 uint32_t line; // line index 659 uint32_t word; // word index 660 uint32_t n; // char index 661 cxy_t mapper_cxy; // mapper cluster identifier 662 mapper_t * mapper_ptr; // mapper local pointer 663 vfs_inode_t * inode_ptr; // inode local pointer 664 665 char name[CONFIG_VFS_MAX_NAME_LENGTH]; 658 666 659 667 if( nbytes > 4096) … … 674 682 } 675 683 684 // get cluster and local pointer 685 mapper_cxy = GET_CXY( mapper_xp ); 686 mapper_ptr = GET_PTR( mapper_xp ); 687 688 // get inode 689 inode_ptr = hal_remote_lpt( XPTR( mapper_cxy , &mapper_ptr->inode ) ); 690 691 // get inode name 692 if( inode_ptr == NULL ) strcpy( name , "fat" ); 693 else vfs_inode_get_name( XPTR( mapper_cxy , inode_ptr ) , name ); 694 676 695 // get extended pointer on page base 677 696 base_xp = ppm_page2base( page_xp ); … … 681 700 682 701 // display 8 words per line 683 tab = (uint32_t *)buffer; 684 printk("\n***** %s : first %d bytes of page %d *****\n", string, nbytes, page_id ); 702 tabi = (uint32_t *)buffer; 703 tabc = (char *)buffer; 704 printk("\n***** <%s> first %d bytes of page %d *****\n", name, nbytes, page_id ); 685 705 for( line = 0 ; line < (nbytes >> 5) ; line++ ) 686 706 { 687 707 printk("%X : ", line ); 688 for( word = 0 ; word < 8 ; word++ ) printk("%X ", tab[(line<<3) + word] ); 708 for( word = 0 ; word < 8 ; word++ ) printk("%X ", tabi[(line<<3) + word] ); 709 printk(" | "); 710 for( n = 0 ; n < 32 ; n++ ) printk("%c", tabc[(line<<5) + n] ); 689 711 printk("\n"); 690 712 } -
trunk/kernel/mm/mapper.h
r612 r614 38 38 39 39 /******************************************************************************************* 40 * Th e mapperimplements the kernel cache for a given VFS file or directory.40 * This mapper_t object implements the kernel cache for a given VFS file or directory. 41 41 * There is one mapper per file/dir. It is implemented as a three levels radix tree, 42 42 * entirely stored in the same cluster as the inode representing the file/dir. … … 63 63 * 64 64 * TODO : the mapper being only used to implement the VFS cache(s), the mapper.c 65 * and mapper.h file should be errotrandfered to the vfs directory.65 * and mapper.h file should be trandfered to the vfs directory. 66 66 ******************************************************************************************/ 67 67 … … 85 85 86 86 /******************************************************************************************* 87 * This structure defines a "fragment". It is used to move data between the kernel mapper,88 * and an user buffer, that can be split in several distributed physical pages located89 * in different clusters. A fragment is a set of contiguous bytes in the file.90 * - It can be stored in one single physical page in the user buffer.91 * - It can spread two successive physical pages in the kernel mapper.92 ******************************************************************************************/93 94 typedef struct fragment_s95 {96 uint32_t file_offset; /*! offset of fragment in file (i.e. in mapper) */97 uint32_t size; /*! number of bytes in fragment */98 cxy_t buf_cxy; /*! user buffer cluster identifier */99 void * buf_ptr; /*! local pointer on first byte in user buffer */100 }101 fragment_t;102 103 /*******************************************************************************************104 87 * This function allocates physical memory for a mapper descriptor, and initializes it 105 88 * (refcount <= 0) / inode <= NULL). … … 158 141 * returns O if success / returns -1 if error. 159 142 ******************************************************************************************/ 160 error_t mapper_move_user( xptr_t mappe _xp,143 error_t mapper_move_user( xptr_t mapper_xp, 161 144 bool_t to_buffer, 162 145 uint32_t file_offset, … … 258 241 * @ page_id : [in] page index in file. 259 242 * @ nbytes : [in] value to be written. 260 * @ string : [in] string printed in header.261 243 * @ returns 0 if success / return -1 if error. 262 244 ******************************************************************************************/ 263 245 error_t mapper_display_page( xptr_t mapper_xp, 264 246 uint32_t page_id, 265 uint32_t nbytes, 266 char * string ); 247 uint32_t nbytes ); 267 248 268 249 -
trunk/kernel/mm/vmm.c
r611 r614 64 64 intptr_t base; 65 65 intptr_t size; 66 uint32_t i; 66 67 67 68 #if DEBUG_VMM_INIT … … 69 70 uint32_t cycle = (uint32_t)hal_get_cycles(); 70 71 if( DEBUG_VMM_INIT ) 71 printk("\n[%s] thread[%x,%x] enter for process %x / cycle %d\n",72 __FUNCTION__ , this->process->pid, this->trdid, process->pid 72 printk("\n[%s] thread[%x,%x] enter for process %x in cluster %x / cycle %d\n", 73 __FUNCTION__ , this->process->pid, this->trdid, process->pid, local_cxy, cycle ); 73 74 #endif 74 75 … … 183 184 vmm->mmap_mgr.first_free_vpn = CONFIG_VMM_HEAP_BASE; 184 185 busylock_init( &vmm->mmap_mgr.lock , LOCK_VMM_MMAP ); 185 186 uint32_t i;187 186 for( i = 0 ; i < 32 ; i++ ) list_root_init( &vmm->mmap_mgr.zombi_list[i] ); 188 187 … … 195 194 cycle = (uint32_t)hal_get_cycles(); 196 195 if( DEBUG_VMM_INIT ) 197 printk("\n[%s] thread[%x,%x] exit / process %x / entry_point%x / cycle %d\n",198 __FUNCTION__, this->process->pid, this->trdid, process->pid, process->vmm.entry_point, cycle );196 printk("\n[%s] thread[%x,%x] exit for process %x in cluster %x / cycle %d\n", 197 __FUNCTION__, this->process->pid, this->trdid, process->pid, local_cxy, cycle ); 199 198 #endif 200 199 … … 944 943 vpn_t free; 945 944 946 // mmap vseg size must be power of 2 945 #if DEBUG_VMM_MMAP_ALLOC 946 thread_t * this = CURRENT_THREAD; 947 uint32_t cycle = (uint32_t)hal_get_cycles(); 948 if( DEBUG_VMM_MMAP_ALLOC < cycle ) 949 printk("\n[%s] thread[%x,%x] enter / cycle %d\n", 950 __FUNCTION__, this->process->pid, this->trdid, cycle ); 951 #endif 952 953 // vseg size must be power of 2 947 954 // compute actual size and index in zombi_list array 948 955 size = POW2_ROUNDUP( npages ); … … 952 959 mmap_mgr_t * mgr = &vmm->mmap_mgr; 953 960 961 printk("\n@@@ in %s : size = %d / index = %d / first = %d / empty = %d\n", 962 __FUNCTION__, size, index, mgr->vpn_size, list_is_empty(&mgr->zombi_list[index]) ); 963 954 964 // get lock on mmap allocator 955 965 busylock_acquire( &mgr->lock ); … … 958 968 if( list_is_empty( &mgr->zombi_list[index] ) ) // from mmap zone 959 969 { 970 971 printk("\n@@@ from mmap zone\n" ); 972 960 973 // check overflow 961 974 free = mgr->first_free_vpn; 962 if( (free + size) > mgr->vpn_size ) return ENOMEM;963 964 // update STACKallocator975 if( (free + size) > mgr->vpn_size ) return -1; 976 977 // update MMAP allocator 965 978 mgr->first_free_vpn += size; 966 979 … … 970 983 else // from zombi_list 971 984 { 985 986 printk("\n@@@ from zombi_list\n" ); 987 972 988 // get pointer on zombi vseg from zombi_list 973 989 vseg = LIST_FIRST( &mgr->zombi_list[index] , vseg_t , zlist ); … … 982 998 // release lock on mmap allocator 983 999 busylock_release( &mgr->lock ); 1000 1001 #if DEBUG_VMM_MMAP_ALLOC 1002 cycle = (uint32_t)hal_get_cycles(); 1003 if( DEBUG_VMM_DESTROY < cycle ) 1004 printk("\n[%s] thread[%x,%x] exit / vpn_base %x / vpn_size %x / cycle %d\n", 1005 __FUNCTION__, this->process->pid, this->trdid, base, size, cycle ); 1006 #endif 984 1007 985 1008 // returns vpn_base, vpn_size … … 1009 1032 uint32_t cycle = (uint32_t)hal_get_cycles(); 1010 1033 if( DEBUG_VMM_CREATE_VSEG < cycle ) 1011 printk("\n[%s] thread[%x,%x] enter / %s / cxy %x / cycle %d\n",1012 __FUNCTION__, this->process->pid, this->trdid, vseg_type_str(type), cxy, cycle );1034 printk("\n[%s] thread[%x,%x] enter for process %x / %s / cxy %x / cycle %d\n", 1035 __FUNCTION__, this->process->pid, this->trdid, process->pid, vseg_type_str(type), cxy, cycle ); 1013 1036 #endif 1014 1037 … … 1092 1115 if( vseg != NULL ) 1093 1116 { 1094 printk("\n[ERROR] in %s for process %x : new vseg [vpn_base = %x / vpn_size =%x]\n"1095 " overlap existing vseg [vpn_base = %x / vpn_size =%x]\n",1117 printk("\n[ERROR] in %s for process %x : new vseg [vpn_base %x / vpn_size %x]\n" 1118 " overlap existing vseg [vpn_base %x / vpn_size %x]\n", 1096 1119 __FUNCTION__ , process->pid, vpn_base, vpn_size, vseg->vpn_base, vseg->vpn_size ); 1097 1120 return NULL; … … 1106 1129 return NULL; 1107 1130 } 1131 1132 #if DEBUG_VMM_CREATE_VSEG 1133 if( DEBUG_VMM_CREATE_VSEG < cycle ) 1134 printk("\n[%s] thread[%x,%x] : base %x / size %x / vpn_base %x / vpn_size %x\n", 1135 __FUNCTION__, this->process->pid, this->trdid, base, size, vpn_base, vpn_size ); 1136 #endif 1108 1137 1109 1138 // initialize vseg descriptor -
trunk/kernel/mm/vmm.h
r611 r614 138 138 * - It initializes the generic page table, calling the HAL specific hal_gpt_init() function. 139 139 * - For TSAR it map all pages for the "kentry" vseg, that must be identity mapping. 140 * Note: 140 ******************************************************a************************************** 141 * Implementation notes: 141 142 * - The "code" and "data" vsegs are registered by the elf_load_process() function. 142 143 * - The "stack" vsegs are dynamically created by the thread_user_create() function. -
trunk/kernel/syscalls/sys_closedir.c
r612 r614 74 74 if( dir_cxy == local_cxy ) 75 75 { 76 user_dir_destroy( dir_ptr ); 76 user_dir_destroy( dir_ptr, 77 process->ref_xp ); 77 78 } 78 79 else 79 80 { 80 81 rpc_user_dir_destroy_client( dir_cxy, 81 dir_ptr ); 82 dir_ptr, 83 process->ref_xp ); 82 84 } 83 85 -
trunk/kernel/syscalls/sys_display.c
r612 r614 357 357 358 358 // display mapper 359 error = mapper_display_page( mapper_xp , page_id , nbytes , kbuf);359 error = mapper_display_page( mapper_xp , page_id , nbytes ); 360 360 361 361 if( error ) -
trunk/kernel/syscalls/sys_opendir.c
r612 r614 143 143 } 144 144 145 // create a new user_dir_t structure in inode cluster 146 // and get the user space pointer on dirent array 145 // create a new user_dir_t structure in target directory inode cluster 146 // map it in the reference user process VMM (in a new ANON vseg) 147 // an get the local pointer on the created user_dir_t structure 147 148 if( inode_cxy == local_cxy ) 148 149 { 149 dir_ptr = user_dir_create( inode_ptr ); 150 dir_ptr = user_dir_create( inode_ptr, 151 process->ref_xp ); 150 152 } 151 153 else … … 153 155 rpc_user_dir_create_client( inode_cxy, 154 156 inode_ptr, 157 process->ref_xp, 155 158 &dir_ptr ); 156 159 }
Note: See TracChangeset
for help on using the changeset viewer.